xref: /openbmc/linux/fs/nfs/nfs4proc.c (revision 56e4ebf877b6043c289bda32a5a7385b80c17dee)
1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/slab.h>
43 #include <linux/sunrpc/clnt.h>
44 #include <linux/nfs.h>
45 #include <linux/nfs4.h>
46 #include <linux/nfs_fs.h>
47 #include <linux/nfs_page.h>
48 #include <linux/namei.h>
49 #include <linux/mount.h>
50 #include <linux/module.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 
53 #include "nfs4_fs.h"
54 #include "delegation.h"
55 #include "internal.h"
56 #include "iostat.h"
57 #include "callback.h"
58 
59 #define NFSDBG_FACILITY		NFSDBG_PROC
60 
61 #define NFS4_POLL_RETRY_MIN	(HZ/10)
62 #define NFS4_POLL_RETRY_MAX	(15*HZ)
63 
64 #define NFS4_MAX_LOOP_ON_RECOVER (10)
65 
66 struct nfs4_opendata;
67 static int _nfs4_proc_open(struct nfs4_opendata *data);
68 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
69 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
70 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
71 static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
72 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
73 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
74 			    struct nfs_fattr *fattr, struct iattr *sattr,
75 			    struct nfs4_state *state);
76 
77 /* Prevent leaks of NFSv4 errors into userland */
78 static int nfs4_map_errors(int err)
79 {
80 	if (err >= -1000)
81 		return err;
82 	switch (err) {
83 	case -NFS4ERR_RESOURCE:
84 		return -EREMOTEIO;
85 	default:
86 		dprintk("%s could not handle NFSv4 error %d\n",
87 				__func__, -err);
88 		break;
89 	}
90 	return -EIO;
91 }
92 
93 /*
94  * This is our standard bitmap for GETATTR requests.
95  */
96 const u32 nfs4_fattr_bitmap[2] = {
97 	FATTR4_WORD0_TYPE
98 	| FATTR4_WORD0_CHANGE
99 	| FATTR4_WORD0_SIZE
100 	| FATTR4_WORD0_FSID
101 	| FATTR4_WORD0_FILEID,
102 	FATTR4_WORD1_MODE
103 	| FATTR4_WORD1_NUMLINKS
104 	| FATTR4_WORD1_OWNER
105 	| FATTR4_WORD1_OWNER_GROUP
106 	| FATTR4_WORD1_RAWDEV
107 	| FATTR4_WORD1_SPACE_USED
108 	| FATTR4_WORD1_TIME_ACCESS
109 	| FATTR4_WORD1_TIME_METADATA
110 	| FATTR4_WORD1_TIME_MODIFY
111 };
112 
113 const u32 nfs4_statfs_bitmap[2] = {
114 	FATTR4_WORD0_FILES_AVAIL
115 	| FATTR4_WORD0_FILES_FREE
116 	| FATTR4_WORD0_FILES_TOTAL,
117 	FATTR4_WORD1_SPACE_AVAIL
118 	| FATTR4_WORD1_SPACE_FREE
119 	| FATTR4_WORD1_SPACE_TOTAL
120 };
121 
122 const u32 nfs4_pathconf_bitmap[2] = {
123 	FATTR4_WORD0_MAXLINK
124 	| FATTR4_WORD0_MAXNAME,
125 	0
126 };
127 
128 const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
129 			| FATTR4_WORD0_MAXREAD
130 			| FATTR4_WORD0_MAXWRITE
131 			| FATTR4_WORD0_LEASE_TIME,
132 			0
133 };
134 
135 const u32 nfs4_fs_locations_bitmap[2] = {
136 	FATTR4_WORD0_TYPE
137 	| FATTR4_WORD0_CHANGE
138 	| FATTR4_WORD0_SIZE
139 	| FATTR4_WORD0_FSID
140 	| FATTR4_WORD0_FILEID
141 	| FATTR4_WORD0_FS_LOCATIONS,
142 	FATTR4_WORD1_MODE
143 	| FATTR4_WORD1_NUMLINKS
144 	| FATTR4_WORD1_OWNER
145 	| FATTR4_WORD1_OWNER_GROUP
146 	| FATTR4_WORD1_RAWDEV
147 	| FATTR4_WORD1_SPACE_USED
148 	| FATTR4_WORD1_TIME_ACCESS
149 	| FATTR4_WORD1_TIME_METADATA
150 	| FATTR4_WORD1_TIME_MODIFY
151 	| FATTR4_WORD1_MOUNTED_ON_FILEID
152 };
153 
154 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
155 		struct nfs4_readdir_arg *readdir)
156 {
157 	__be32 *start, *p;
158 
159 	BUG_ON(readdir->count < 80);
160 	if (cookie > 2) {
161 		readdir->cookie = cookie;
162 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
163 		return;
164 	}
165 
166 	readdir->cookie = 0;
167 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
168 	if (cookie == 2)
169 		return;
170 
171 	/*
172 	 * NFSv4 servers do not return entries for '.' and '..'
173 	 * Therefore, we fake these entries here.  We let '.'
174 	 * have cookie 0 and '..' have cookie 1.  Note that
175 	 * when talking to the server, we always send cookie 0
176 	 * instead of 1 or 2.
177 	 */
178 	start = p = kmap_atomic(*readdir->pages, KM_USER0);
179 
180 	if (cookie == 0) {
181 		*p++ = xdr_one;                                  /* next */
182 		*p++ = xdr_zero;                   /* cookie, first word */
183 		*p++ = xdr_one;                   /* cookie, second word */
184 		*p++ = xdr_one;                             /* entry len */
185 		memcpy(p, ".\0\0\0", 4);                        /* entry */
186 		p++;
187 		*p++ = xdr_one;                         /* bitmap length */
188 		*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
189 		*p++ = htonl(8);              /* attribute buffer length */
190 		p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
191 	}
192 
193 	*p++ = xdr_one;                                  /* next */
194 	*p++ = xdr_zero;                   /* cookie, first word */
195 	*p++ = xdr_two;                   /* cookie, second word */
196 	*p++ = xdr_two;                             /* entry len */
197 	memcpy(p, "..\0\0", 4);                         /* entry */
198 	p++;
199 	*p++ = xdr_one;                         /* bitmap length */
200 	*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
201 	*p++ = htonl(8);              /* attribute buffer length */
202 	p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
203 
204 	readdir->pgbase = (char *)p - (char *)start;
205 	readdir->count -= readdir->pgbase;
206 	kunmap_atomic(start, KM_USER0);
207 }
208 
209 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
210 {
211 	int res;
212 
213 	might_sleep();
214 
215 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
216 			nfs_wait_bit_killable, TASK_KILLABLE);
217 	return res;
218 }
219 
220 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
221 {
222 	int res = 0;
223 
224 	might_sleep();
225 
226 	if (*timeout <= 0)
227 		*timeout = NFS4_POLL_RETRY_MIN;
228 	if (*timeout > NFS4_POLL_RETRY_MAX)
229 		*timeout = NFS4_POLL_RETRY_MAX;
230 	schedule_timeout_killable(*timeout);
231 	if (fatal_signal_pending(current))
232 		res = -ERESTARTSYS;
233 	*timeout <<= 1;
234 	return res;
235 }
236 
237 /* This is the error handling routine for processes that are allowed
238  * to sleep.
239  */
240 static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
241 {
242 	struct nfs_client *clp = server->nfs_client;
243 	struct nfs4_state *state = exception->state;
244 	int ret = errorcode;
245 
246 	exception->retry = 0;
247 	switch(errorcode) {
248 		case 0:
249 			return 0;
250 		case -NFS4ERR_ADMIN_REVOKED:
251 		case -NFS4ERR_BAD_STATEID:
252 		case -NFS4ERR_OPENMODE:
253 			if (state == NULL)
254 				break;
255 			nfs4_state_mark_reclaim_nograce(clp, state);
256 			goto do_state_recovery;
257 		case -NFS4ERR_STALE_STATEID:
258 		case -NFS4ERR_STALE_CLIENTID:
259 		case -NFS4ERR_EXPIRED:
260 			goto do_state_recovery;
261 #if defined(CONFIG_NFS_V4_1)
262 		case -NFS4ERR_BADSESSION:
263 		case -NFS4ERR_BADSLOT:
264 		case -NFS4ERR_BAD_HIGH_SLOT:
265 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
266 		case -NFS4ERR_DEADSESSION:
267 		case -NFS4ERR_SEQ_FALSE_RETRY:
268 		case -NFS4ERR_SEQ_MISORDERED:
269 			dprintk("%s ERROR: %d Reset session\n", __func__,
270 				errorcode);
271 			nfs4_schedule_state_recovery(clp);
272 			exception->retry = 1;
273 			break;
274 #endif /* defined(CONFIG_NFS_V4_1) */
275 		case -NFS4ERR_FILE_OPEN:
276 			if (exception->timeout > HZ) {
277 				/* We have retried a decent amount, time to
278 				 * fail
279 				 */
280 				ret = -EBUSY;
281 				break;
282 			}
283 		case -NFS4ERR_GRACE:
284 		case -NFS4ERR_DELAY:
285 		case -EKEYEXPIRED:
286 			ret = nfs4_delay(server->client, &exception->timeout);
287 			if (ret != 0)
288 				break;
289 		case -NFS4ERR_OLD_STATEID:
290 			exception->retry = 1;
291 	}
292 	/* We failed to handle the error */
293 	return nfs4_map_errors(ret);
294 do_state_recovery:
295 	nfs4_schedule_state_recovery(clp);
296 	ret = nfs4_wait_clnt_recover(clp);
297 	if (ret == 0)
298 		exception->retry = 1;
299 	return ret;
300 }
301 
302 
303 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
304 {
305 	spin_lock(&clp->cl_lock);
306 	if (time_before(clp->cl_last_renewal,timestamp))
307 		clp->cl_last_renewal = timestamp;
308 	spin_unlock(&clp->cl_lock);
309 }
310 
311 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
312 {
313 	do_renew_lease(server->nfs_client, timestamp);
314 }
315 
316 #if defined(CONFIG_NFS_V4_1)
317 
318 /*
319  * nfs4_free_slot - free a slot and efficiently update slot table.
320  *
321  * freeing a slot is trivially done by clearing its respective bit
322  * in the bitmap.
323  * If the freed slotid equals highest_used_slotid we want to update it
324  * so that the server would be able to size down the slot table if needed,
325  * otherwise we know that the highest_used_slotid is still in use.
326  * When updating highest_used_slotid there may be "holes" in the bitmap
327  * so we need to scan down from highest_used_slotid to 0 looking for the now
328  * highest slotid in use.
329  * If none found, highest_used_slotid is set to -1.
330  *
331  * Must be called while holding tbl->slot_tbl_lock
332  */
333 static void
334 nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
335 {
336 	int free_slotid = free_slot - tbl->slots;
337 	int slotid = free_slotid;
338 
339 	BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
340 	/* clear used bit in bitmap */
341 	__clear_bit(slotid, tbl->used_slots);
342 
343 	/* update highest_used_slotid when it is freed */
344 	if (slotid == tbl->highest_used_slotid) {
345 		slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
346 		if (slotid < tbl->max_slots)
347 			tbl->highest_used_slotid = slotid;
348 		else
349 			tbl->highest_used_slotid = -1;
350 	}
351 	dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
352 		free_slotid, tbl->highest_used_slotid);
353 }
354 
355 /*
356  * Signal state manager thread if session is drained
357  */
358 static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
359 {
360 	struct rpc_task *task;
361 
362 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
363 		task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
364 		if (task)
365 			rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
366 		return;
367 	}
368 
369 	if (ses->fc_slot_table.highest_used_slotid != -1)
370 		return;
371 
372 	dprintk("%s COMPLETE: Session Drained\n", __func__);
373 	complete(&ses->complete);
374 }
375 
376 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
377 {
378 	struct nfs4_slot_table *tbl;
379 
380 	tbl = &res->sr_session->fc_slot_table;
381 	if (!res->sr_slot) {
382 		/* just wake up the next guy waiting since
383 		 * we may have not consumed a slot after all */
384 		dprintk("%s: No slot\n", __func__);
385 		return;
386 	}
387 
388 	spin_lock(&tbl->slot_tbl_lock);
389 	nfs4_free_slot(tbl, res->sr_slot);
390 	nfs41_check_drain_session_complete(res->sr_session);
391 	spin_unlock(&tbl->slot_tbl_lock);
392 	res->sr_slot = NULL;
393 }
394 
395 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
396 {
397 	unsigned long timestamp;
398 	struct nfs_client *clp;
399 
400 	/*
401 	 * sr_status remains 1 if an RPC level error occurred. The server
402 	 * may or may not have processed the sequence operation..
403 	 * Proceed as if the server received and processed the sequence
404 	 * operation.
405 	 */
406 	if (res->sr_status == 1)
407 		res->sr_status = NFS_OK;
408 
409 	/* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
410 	if (!res->sr_slot)
411 		goto out;
412 
413 	/* Check the SEQUENCE operation status */
414 	switch (res->sr_status) {
415 	case 0:
416 		/* Update the slot's sequence and clientid lease timer */
417 		++res->sr_slot->seq_nr;
418 		timestamp = res->sr_renewal_time;
419 		clp = res->sr_session->clp;
420 		do_renew_lease(clp, timestamp);
421 		/* Check sequence flags */
422 		if (atomic_read(&clp->cl_count) > 1)
423 			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
424 		break;
425 	case -NFS4ERR_DELAY:
426 		/* The server detected a resend of the RPC call and
427 		 * returned NFS4ERR_DELAY as per Section 2.10.6.2
428 		 * of RFC5661.
429 		 */
430 		dprintk("%s: slot=%ld seq=%d: Operation in progress\n",
431 			__func__,
432 			res->sr_slot - res->sr_session->fc_slot_table.slots,
433 			res->sr_slot->seq_nr);
434 		goto out_retry;
435 	default:
436 		/* Just update the slot sequence no. */
437 		++res->sr_slot->seq_nr;
438 	}
439 out:
440 	/* The session may be reset by one of the error handlers. */
441 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
442 	nfs41_sequence_free_slot(res);
443 	return 1;
444 out_retry:
445 	if (!rpc_restart_call(task))
446 		goto out;
447 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
448 	return 0;
449 }
450 
451 static int nfs4_sequence_done(struct rpc_task *task,
452 			       struct nfs4_sequence_res *res)
453 {
454 	if (res->sr_session == NULL)
455 		return 1;
456 	return nfs41_sequence_done(task, res);
457 }
458 
459 /*
460  * nfs4_find_slot - efficiently look for a free slot
461  *
462  * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
463  * If found, we mark the slot as used, update the highest_used_slotid,
464  * and respectively set up the sequence operation args.
465  * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
466  *
467  * Note: must be called with under the slot_tbl_lock.
468  */
469 static u8
470 nfs4_find_slot(struct nfs4_slot_table *tbl)
471 {
472 	int slotid;
473 	u8 ret_id = NFS4_MAX_SLOT_TABLE;
474 	BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
475 
476 	dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
477 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
478 		tbl->max_slots);
479 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
480 	if (slotid >= tbl->max_slots)
481 		goto out;
482 	__set_bit(slotid, tbl->used_slots);
483 	if (slotid > tbl->highest_used_slotid)
484 		tbl->highest_used_slotid = slotid;
485 	ret_id = slotid;
486 out:
487 	dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
488 		__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
489 	return ret_id;
490 }
491 
492 static int nfs41_setup_sequence(struct nfs4_session *session,
493 				struct nfs4_sequence_args *args,
494 				struct nfs4_sequence_res *res,
495 				int cache_reply,
496 				struct rpc_task *task)
497 {
498 	struct nfs4_slot *slot;
499 	struct nfs4_slot_table *tbl;
500 	u8 slotid;
501 
502 	dprintk("--> %s\n", __func__);
503 	/* slot already allocated? */
504 	if (res->sr_slot != NULL)
505 		return 0;
506 
507 	tbl = &session->fc_slot_table;
508 
509 	spin_lock(&tbl->slot_tbl_lock);
510 	if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
511 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
512 		/*
513 		 * The state manager will wait until the slot table is empty.
514 		 * Schedule the reset thread
515 		 */
516 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
517 		spin_unlock(&tbl->slot_tbl_lock);
518 		dprintk("%s Schedule Session Reset\n", __func__);
519 		return -EAGAIN;
520 	}
521 
522 	if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
523 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
524 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
525 		spin_unlock(&tbl->slot_tbl_lock);
526 		dprintk("%s enforce FIFO order\n", __func__);
527 		return -EAGAIN;
528 	}
529 
530 	slotid = nfs4_find_slot(tbl);
531 	if (slotid == NFS4_MAX_SLOT_TABLE) {
532 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
533 		spin_unlock(&tbl->slot_tbl_lock);
534 		dprintk("<-- %s: no free slots\n", __func__);
535 		return -EAGAIN;
536 	}
537 	spin_unlock(&tbl->slot_tbl_lock);
538 
539 	rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
540 	slot = tbl->slots + slotid;
541 	args->sa_session = session;
542 	args->sa_slotid = slotid;
543 	args->sa_cache_this = cache_reply;
544 
545 	dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
546 
547 	res->sr_session = session;
548 	res->sr_slot = slot;
549 	res->sr_renewal_time = jiffies;
550 	res->sr_status_flags = 0;
551 	/*
552 	 * sr_status is only set in decode_sequence, and so will remain
553 	 * set to 1 if an rpc level failure occurs.
554 	 */
555 	res->sr_status = 1;
556 	return 0;
557 }
558 
559 int nfs4_setup_sequence(const struct nfs_server *server,
560 			struct nfs4_sequence_args *args,
561 			struct nfs4_sequence_res *res,
562 			int cache_reply,
563 			struct rpc_task *task)
564 {
565 	struct nfs4_session *session = nfs4_get_session(server);
566 	int ret = 0;
567 
568 	if (session == NULL) {
569 		args->sa_session = NULL;
570 		res->sr_session = NULL;
571 		goto out;
572 	}
573 
574 	dprintk("--> %s clp %p session %p sr_slot %ld\n",
575 		__func__, session->clp, session, res->sr_slot ?
576 			res->sr_slot - session->fc_slot_table.slots : -1);
577 
578 	ret = nfs41_setup_sequence(session, args, res, cache_reply,
579 				   task);
580 out:
581 	dprintk("<-- %s status=%d\n", __func__, ret);
582 	return ret;
583 }
584 
585 struct nfs41_call_sync_data {
586 	const struct nfs_server *seq_server;
587 	struct nfs4_sequence_args *seq_args;
588 	struct nfs4_sequence_res *seq_res;
589 	int cache_reply;
590 };
591 
592 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
593 {
594 	struct nfs41_call_sync_data *data = calldata;
595 
596 	dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
597 
598 	if (nfs4_setup_sequence(data->seq_server, data->seq_args,
599 				data->seq_res, data->cache_reply, task))
600 		return;
601 	rpc_call_start(task);
602 }
603 
604 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
605 {
606 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
607 	nfs41_call_sync_prepare(task, calldata);
608 }
609 
610 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
611 {
612 	struct nfs41_call_sync_data *data = calldata;
613 
614 	nfs41_sequence_done(task, data->seq_res);
615 }
616 
617 struct rpc_call_ops nfs41_call_sync_ops = {
618 	.rpc_call_prepare = nfs41_call_sync_prepare,
619 	.rpc_call_done = nfs41_call_sync_done,
620 };
621 
622 struct rpc_call_ops nfs41_call_priv_sync_ops = {
623 	.rpc_call_prepare = nfs41_call_priv_sync_prepare,
624 	.rpc_call_done = nfs41_call_sync_done,
625 };
626 
627 static int nfs4_call_sync_sequence(struct nfs_server *server,
628 				   struct rpc_message *msg,
629 				   struct nfs4_sequence_args *args,
630 				   struct nfs4_sequence_res *res,
631 				   int cache_reply,
632 				   int privileged)
633 {
634 	int ret;
635 	struct rpc_task *task;
636 	struct nfs41_call_sync_data data = {
637 		.seq_server = server,
638 		.seq_args = args,
639 		.seq_res = res,
640 		.cache_reply = cache_reply,
641 	};
642 	struct rpc_task_setup task_setup = {
643 		.rpc_client = server->client,
644 		.rpc_message = msg,
645 		.callback_ops = &nfs41_call_sync_ops,
646 		.callback_data = &data
647 	};
648 
649 	res->sr_slot = NULL;
650 	if (privileged)
651 		task_setup.callback_ops = &nfs41_call_priv_sync_ops;
652 	task = rpc_run_task(&task_setup);
653 	if (IS_ERR(task))
654 		ret = PTR_ERR(task);
655 	else {
656 		ret = task->tk_status;
657 		rpc_put_task(task);
658 	}
659 	return ret;
660 }
661 
662 int _nfs4_call_sync_session(struct nfs_server *server,
663 			    struct rpc_message *msg,
664 			    struct nfs4_sequence_args *args,
665 			    struct nfs4_sequence_res *res,
666 			    int cache_reply)
667 {
668 	return nfs4_call_sync_sequence(server, msg, args, res, cache_reply, 0);
669 }
670 
671 #else
672 static int nfs4_sequence_done(struct rpc_task *task,
673 			       struct nfs4_sequence_res *res)
674 {
675 	return 1;
676 }
677 #endif /* CONFIG_NFS_V4_1 */
678 
679 int _nfs4_call_sync(struct nfs_server *server,
680 		    struct rpc_message *msg,
681 		    struct nfs4_sequence_args *args,
682 		    struct nfs4_sequence_res *res,
683 		    int cache_reply)
684 {
685 	args->sa_session = res->sr_session = NULL;
686 	return rpc_call_sync(server->client, msg, 0);
687 }
688 
689 #define nfs4_call_sync(server, msg, args, res, cache_reply) \
690 	(server)->nfs_client->cl_mvops->call_sync((server), (msg), &(args)->seq_args, \
691 			&(res)->seq_res, (cache_reply))
692 
693 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
694 {
695 	struct nfs_inode *nfsi = NFS_I(dir);
696 
697 	spin_lock(&dir->i_lock);
698 	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
699 	if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
700 		nfs_force_lookup_revalidate(dir);
701 	nfsi->change_attr = cinfo->after;
702 	spin_unlock(&dir->i_lock);
703 }
704 
705 struct nfs4_opendata {
706 	struct kref kref;
707 	struct nfs_openargs o_arg;
708 	struct nfs_openres o_res;
709 	struct nfs_open_confirmargs c_arg;
710 	struct nfs_open_confirmres c_res;
711 	struct nfs_fattr f_attr;
712 	struct nfs_fattr dir_attr;
713 	struct path path;
714 	struct dentry *dir;
715 	struct nfs4_state_owner *owner;
716 	struct nfs4_state *state;
717 	struct iattr attrs;
718 	unsigned long timestamp;
719 	unsigned int rpc_done : 1;
720 	int rpc_status;
721 	int cancelled;
722 };
723 
724 
725 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
726 {
727 	p->o_res.f_attr = &p->f_attr;
728 	p->o_res.dir_attr = &p->dir_attr;
729 	p->o_res.seqid = p->o_arg.seqid;
730 	p->c_res.seqid = p->c_arg.seqid;
731 	p->o_res.server = p->o_arg.server;
732 	nfs_fattr_init(&p->f_attr);
733 	nfs_fattr_init(&p->dir_attr);
734 }
735 
736 static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
737 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
738 		const struct iattr *attrs,
739 		gfp_t gfp_mask)
740 {
741 	struct dentry *parent = dget_parent(path->dentry);
742 	struct inode *dir = parent->d_inode;
743 	struct nfs_server *server = NFS_SERVER(dir);
744 	struct nfs4_opendata *p;
745 
746 	p = kzalloc(sizeof(*p), gfp_mask);
747 	if (p == NULL)
748 		goto err;
749 	p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
750 	if (p->o_arg.seqid == NULL)
751 		goto err_free;
752 	path_get(path);
753 	p->path = *path;
754 	p->dir = parent;
755 	p->owner = sp;
756 	atomic_inc(&sp->so_count);
757 	p->o_arg.fh = NFS_FH(dir);
758 	p->o_arg.open_flags = flags;
759 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
760 	p->o_arg.clientid = server->nfs_client->cl_clientid;
761 	p->o_arg.id = sp->so_owner_id.id;
762 	p->o_arg.name = &p->path.dentry->d_name;
763 	p->o_arg.server = server;
764 	p->o_arg.bitmask = server->attr_bitmask;
765 	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
766 	if (flags & O_CREAT) {
767 		u32 *s;
768 
769 		p->o_arg.u.attrs = &p->attrs;
770 		memcpy(&p->attrs, attrs, sizeof(p->attrs));
771 		s = (u32 *) p->o_arg.u.verifier.data;
772 		s[0] = jiffies;
773 		s[1] = current->pid;
774 	}
775 	p->c_arg.fh = &p->o_res.fh;
776 	p->c_arg.stateid = &p->o_res.stateid;
777 	p->c_arg.seqid = p->o_arg.seqid;
778 	nfs4_init_opendata_res(p);
779 	kref_init(&p->kref);
780 	return p;
781 err_free:
782 	kfree(p);
783 err:
784 	dput(parent);
785 	return NULL;
786 }
787 
788 static void nfs4_opendata_free(struct kref *kref)
789 {
790 	struct nfs4_opendata *p = container_of(kref,
791 			struct nfs4_opendata, kref);
792 
793 	nfs_free_seqid(p->o_arg.seqid);
794 	if (p->state != NULL)
795 		nfs4_put_open_state(p->state);
796 	nfs4_put_state_owner(p->owner);
797 	dput(p->dir);
798 	path_put(&p->path);
799 	kfree(p);
800 }
801 
802 static void nfs4_opendata_put(struct nfs4_opendata *p)
803 {
804 	if (p != NULL)
805 		kref_put(&p->kref, nfs4_opendata_free);
806 }
807 
808 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
809 {
810 	int ret;
811 
812 	ret = rpc_wait_for_completion_task(task);
813 	return ret;
814 }
815 
816 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
817 {
818 	int ret = 0;
819 
820 	if (open_mode & O_EXCL)
821 		goto out;
822 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
823 		case FMODE_READ:
824 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
825 				&& state->n_rdonly != 0;
826 			break;
827 		case FMODE_WRITE:
828 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
829 				&& state->n_wronly != 0;
830 			break;
831 		case FMODE_READ|FMODE_WRITE:
832 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
833 				&& state->n_rdwr != 0;
834 	}
835 out:
836 	return ret;
837 }
838 
839 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
840 {
841 	if ((delegation->type & fmode) != fmode)
842 		return 0;
843 	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
844 		return 0;
845 	nfs_mark_delegation_referenced(delegation);
846 	return 1;
847 }
848 
849 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
850 {
851 	switch (fmode) {
852 		case FMODE_WRITE:
853 			state->n_wronly++;
854 			break;
855 		case FMODE_READ:
856 			state->n_rdonly++;
857 			break;
858 		case FMODE_READ|FMODE_WRITE:
859 			state->n_rdwr++;
860 	}
861 	nfs4_state_set_mode_locked(state, state->state | fmode);
862 }
863 
864 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
865 {
866 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
867 		memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
868 	memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
869 	switch (fmode) {
870 		case FMODE_READ:
871 			set_bit(NFS_O_RDONLY_STATE, &state->flags);
872 			break;
873 		case FMODE_WRITE:
874 			set_bit(NFS_O_WRONLY_STATE, &state->flags);
875 			break;
876 		case FMODE_READ|FMODE_WRITE:
877 			set_bit(NFS_O_RDWR_STATE, &state->flags);
878 	}
879 }
880 
881 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
882 {
883 	write_seqlock(&state->seqlock);
884 	nfs_set_open_stateid_locked(state, stateid, fmode);
885 	write_sequnlock(&state->seqlock);
886 }
887 
888 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
889 {
890 	/*
891 	 * Protect the call to nfs4_state_set_mode_locked and
892 	 * serialise the stateid update
893 	 */
894 	write_seqlock(&state->seqlock);
895 	if (deleg_stateid != NULL) {
896 		memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
897 		set_bit(NFS_DELEGATED_STATE, &state->flags);
898 	}
899 	if (open_stateid != NULL)
900 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
901 	write_sequnlock(&state->seqlock);
902 	spin_lock(&state->owner->so_lock);
903 	update_open_stateflags(state, fmode);
904 	spin_unlock(&state->owner->so_lock);
905 }
906 
907 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
908 {
909 	struct nfs_inode *nfsi = NFS_I(state->inode);
910 	struct nfs_delegation *deleg_cur;
911 	int ret = 0;
912 
913 	fmode &= (FMODE_READ|FMODE_WRITE);
914 
915 	rcu_read_lock();
916 	deleg_cur = rcu_dereference(nfsi->delegation);
917 	if (deleg_cur == NULL)
918 		goto no_delegation;
919 
920 	spin_lock(&deleg_cur->lock);
921 	if (nfsi->delegation != deleg_cur ||
922 	    (deleg_cur->type & fmode) != fmode)
923 		goto no_delegation_unlock;
924 
925 	if (delegation == NULL)
926 		delegation = &deleg_cur->stateid;
927 	else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
928 		goto no_delegation_unlock;
929 
930 	nfs_mark_delegation_referenced(deleg_cur);
931 	__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
932 	ret = 1;
933 no_delegation_unlock:
934 	spin_unlock(&deleg_cur->lock);
935 no_delegation:
936 	rcu_read_unlock();
937 
938 	if (!ret && open_stateid != NULL) {
939 		__update_open_stateid(state, open_stateid, NULL, fmode);
940 		ret = 1;
941 	}
942 
943 	return ret;
944 }
945 
946 
947 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
948 {
949 	struct nfs_delegation *delegation;
950 
951 	rcu_read_lock();
952 	delegation = rcu_dereference(NFS_I(inode)->delegation);
953 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
954 		rcu_read_unlock();
955 		return;
956 	}
957 	rcu_read_unlock();
958 	nfs_inode_return_delegation(inode);
959 }
960 
961 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
962 {
963 	struct nfs4_state *state = opendata->state;
964 	struct nfs_inode *nfsi = NFS_I(state->inode);
965 	struct nfs_delegation *delegation;
966 	int open_mode = opendata->o_arg.open_flags & O_EXCL;
967 	fmode_t fmode = opendata->o_arg.fmode;
968 	nfs4_stateid stateid;
969 	int ret = -EAGAIN;
970 
971 	for (;;) {
972 		if (can_open_cached(state, fmode, open_mode)) {
973 			spin_lock(&state->owner->so_lock);
974 			if (can_open_cached(state, fmode, open_mode)) {
975 				update_open_stateflags(state, fmode);
976 				spin_unlock(&state->owner->so_lock);
977 				goto out_return_state;
978 			}
979 			spin_unlock(&state->owner->so_lock);
980 		}
981 		rcu_read_lock();
982 		delegation = rcu_dereference(nfsi->delegation);
983 		if (delegation == NULL ||
984 		    !can_open_delegated(delegation, fmode)) {
985 			rcu_read_unlock();
986 			break;
987 		}
988 		/* Save the delegation */
989 		memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
990 		rcu_read_unlock();
991 		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
992 		if (ret != 0)
993 			goto out;
994 		ret = -EAGAIN;
995 
996 		/* Try to update the stateid using the delegation */
997 		if (update_open_stateid(state, NULL, &stateid, fmode))
998 			goto out_return_state;
999 	}
1000 out:
1001 	return ERR_PTR(ret);
1002 out_return_state:
1003 	atomic_inc(&state->count);
1004 	return state;
1005 }
1006 
1007 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1008 {
1009 	struct inode *inode;
1010 	struct nfs4_state *state = NULL;
1011 	struct nfs_delegation *delegation;
1012 	int ret;
1013 
1014 	if (!data->rpc_done) {
1015 		state = nfs4_try_open_cached(data);
1016 		goto out;
1017 	}
1018 
1019 	ret = -EAGAIN;
1020 	if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1021 		goto err;
1022 	inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1023 	ret = PTR_ERR(inode);
1024 	if (IS_ERR(inode))
1025 		goto err;
1026 	ret = -ENOMEM;
1027 	state = nfs4_get_open_state(inode, data->owner);
1028 	if (state == NULL)
1029 		goto err_put_inode;
1030 	if (data->o_res.delegation_type != 0) {
1031 		int delegation_flags = 0;
1032 
1033 		rcu_read_lock();
1034 		delegation = rcu_dereference(NFS_I(inode)->delegation);
1035 		if (delegation)
1036 			delegation_flags = delegation->flags;
1037 		rcu_read_unlock();
1038 		if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1039 			nfs_inode_set_delegation(state->inode,
1040 					data->owner->so_cred,
1041 					&data->o_res);
1042 		else
1043 			nfs_inode_reclaim_delegation(state->inode,
1044 					data->owner->so_cred,
1045 					&data->o_res);
1046 	}
1047 
1048 	update_open_stateid(state, &data->o_res.stateid, NULL,
1049 			data->o_arg.fmode);
1050 	iput(inode);
1051 out:
1052 	return state;
1053 err_put_inode:
1054 	iput(inode);
1055 err:
1056 	return ERR_PTR(ret);
1057 }
1058 
1059 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1060 {
1061 	struct nfs_inode *nfsi = NFS_I(state->inode);
1062 	struct nfs_open_context *ctx;
1063 
1064 	spin_lock(&state->inode->i_lock);
1065 	list_for_each_entry(ctx, &nfsi->open_files, list) {
1066 		if (ctx->state != state)
1067 			continue;
1068 		get_nfs_open_context(ctx);
1069 		spin_unlock(&state->inode->i_lock);
1070 		return ctx;
1071 	}
1072 	spin_unlock(&state->inode->i_lock);
1073 	return ERR_PTR(-ENOENT);
1074 }
1075 
1076 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1077 {
1078 	struct nfs4_opendata *opendata;
1079 
1080 	opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS);
1081 	if (opendata == NULL)
1082 		return ERR_PTR(-ENOMEM);
1083 	opendata->state = state;
1084 	atomic_inc(&state->count);
1085 	return opendata;
1086 }
1087 
1088 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1089 {
1090 	struct nfs4_state *newstate;
1091 	int ret;
1092 
1093 	opendata->o_arg.open_flags = 0;
1094 	opendata->o_arg.fmode = fmode;
1095 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1096 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1097 	nfs4_init_opendata_res(opendata);
1098 	ret = _nfs4_recover_proc_open(opendata);
1099 	if (ret != 0)
1100 		return ret;
1101 	newstate = nfs4_opendata_to_nfs4_state(opendata);
1102 	if (IS_ERR(newstate))
1103 		return PTR_ERR(newstate);
1104 	nfs4_close_state(&opendata->path, newstate, fmode);
1105 	*res = newstate;
1106 	return 0;
1107 }
1108 
1109 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1110 {
1111 	struct nfs4_state *newstate;
1112 	int ret;
1113 
1114 	/* memory barrier prior to reading state->n_* */
1115 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1116 	smp_rmb();
1117 	if (state->n_rdwr != 0) {
1118 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
1119 		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1120 		if (ret != 0)
1121 			return ret;
1122 		if (newstate != state)
1123 			return -ESTALE;
1124 	}
1125 	if (state->n_wronly != 0) {
1126 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1127 		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1128 		if (ret != 0)
1129 			return ret;
1130 		if (newstate != state)
1131 			return -ESTALE;
1132 	}
1133 	if (state->n_rdonly != 0) {
1134 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1135 		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1136 		if (ret != 0)
1137 			return ret;
1138 		if (newstate != state)
1139 			return -ESTALE;
1140 	}
1141 	/*
1142 	 * We may have performed cached opens for all three recoveries.
1143 	 * Check if we need to update the current stateid.
1144 	 */
1145 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1146 	    memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
1147 		write_seqlock(&state->seqlock);
1148 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1149 			memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
1150 		write_sequnlock(&state->seqlock);
1151 	}
1152 	return 0;
1153 }
1154 
1155 /*
1156  * OPEN_RECLAIM:
1157  * 	reclaim state on the server after a reboot.
1158  */
1159 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1160 {
1161 	struct nfs_delegation *delegation;
1162 	struct nfs4_opendata *opendata;
1163 	fmode_t delegation_type = 0;
1164 	int status;
1165 
1166 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1167 	if (IS_ERR(opendata))
1168 		return PTR_ERR(opendata);
1169 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1170 	opendata->o_arg.fh = NFS_FH(state->inode);
1171 	rcu_read_lock();
1172 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1173 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1174 		delegation_type = delegation->type;
1175 	rcu_read_unlock();
1176 	opendata->o_arg.u.delegation_type = delegation_type;
1177 	status = nfs4_open_recover(opendata, state);
1178 	nfs4_opendata_put(opendata);
1179 	return status;
1180 }
1181 
1182 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1183 {
1184 	struct nfs_server *server = NFS_SERVER(state->inode);
1185 	struct nfs4_exception exception = { };
1186 	int err;
1187 	do {
1188 		err = _nfs4_do_open_reclaim(ctx, state);
1189 		if (err != -NFS4ERR_DELAY)
1190 			break;
1191 		nfs4_handle_exception(server, err, &exception);
1192 	} while (exception.retry);
1193 	return err;
1194 }
1195 
1196 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1197 {
1198 	struct nfs_open_context *ctx;
1199 	int ret;
1200 
1201 	ctx = nfs4_state_find_open_context(state);
1202 	if (IS_ERR(ctx))
1203 		return PTR_ERR(ctx);
1204 	ret = nfs4_do_open_reclaim(ctx, state);
1205 	put_nfs_open_context(ctx);
1206 	return ret;
1207 }
1208 
1209 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1210 {
1211 	struct nfs4_opendata *opendata;
1212 	int ret;
1213 
1214 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1215 	if (IS_ERR(opendata))
1216 		return PTR_ERR(opendata);
1217 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1218 	memcpy(opendata->o_arg.u.delegation.data, stateid->data,
1219 			sizeof(opendata->o_arg.u.delegation.data));
1220 	ret = nfs4_open_recover(opendata, state);
1221 	nfs4_opendata_put(opendata);
1222 	return ret;
1223 }
1224 
1225 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1226 {
1227 	struct nfs4_exception exception = { };
1228 	struct nfs_server *server = NFS_SERVER(state->inode);
1229 	int err;
1230 	do {
1231 		err = _nfs4_open_delegation_recall(ctx, state, stateid);
1232 		switch (err) {
1233 			case 0:
1234 			case -ENOENT:
1235 			case -ESTALE:
1236 				goto out;
1237 			case -NFS4ERR_BADSESSION:
1238 			case -NFS4ERR_BADSLOT:
1239 			case -NFS4ERR_BAD_HIGH_SLOT:
1240 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1241 			case -NFS4ERR_DEADSESSION:
1242 				nfs4_schedule_state_recovery(
1243 					server->nfs_client);
1244 				goto out;
1245 			case -NFS4ERR_STALE_CLIENTID:
1246 			case -NFS4ERR_STALE_STATEID:
1247 			case -NFS4ERR_EXPIRED:
1248 				/* Don't recall a delegation if it was lost */
1249 				nfs4_schedule_state_recovery(server->nfs_client);
1250 				goto out;
1251 			case -ERESTARTSYS:
1252 				/*
1253 				 * The show must go on: exit, but mark the
1254 				 * stateid as needing recovery.
1255 				 */
1256 			case -NFS4ERR_ADMIN_REVOKED:
1257 			case -NFS4ERR_BAD_STATEID:
1258 				nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
1259 			case -EKEYEXPIRED:
1260 				/*
1261 				 * User RPCSEC_GSS context has expired.
1262 				 * We cannot recover this stateid now, so
1263 				 * skip it and allow recovery thread to
1264 				 * proceed.
1265 				 */
1266 			case -ENOMEM:
1267 				err = 0;
1268 				goto out;
1269 		}
1270 		err = nfs4_handle_exception(server, err, &exception);
1271 	} while (exception.retry);
1272 out:
1273 	return err;
1274 }
1275 
1276 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1277 {
1278 	struct nfs4_opendata *data = calldata;
1279 
1280 	data->rpc_status = task->tk_status;
1281 	if (data->rpc_status == 0) {
1282 		memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
1283 				sizeof(data->o_res.stateid.data));
1284 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
1285 		renew_lease(data->o_res.server, data->timestamp);
1286 		data->rpc_done = 1;
1287 	}
1288 }
1289 
1290 static void nfs4_open_confirm_release(void *calldata)
1291 {
1292 	struct nfs4_opendata *data = calldata;
1293 	struct nfs4_state *state = NULL;
1294 
1295 	/* If this request hasn't been cancelled, do nothing */
1296 	if (data->cancelled == 0)
1297 		goto out_free;
1298 	/* In case of error, no cleanup! */
1299 	if (!data->rpc_done)
1300 		goto out_free;
1301 	state = nfs4_opendata_to_nfs4_state(data);
1302 	if (!IS_ERR(state))
1303 		nfs4_close_state(&data->path, state, data->o_arg.fmode);
1304 out_free:
1305 	nfs4_opendata_put(data);
1306 }
1307 
1308 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1309 	.rpc_call_done = nfs4_open_confirm_done,
1310 	.rpc_release = nfs4_open_confirm_release,
1311 };
1312 
1313 /*
1314  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1315  */
1316 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1317 {
1318 	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1319 	struct rpc_task *task;
1320 	struct  rpc_message msg = {
1321 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1322 		.rpc_argp = &data->c_arg,
1323 		.rpc_resp = &data->c_res,
1324 		.rpc_cred = data->owner->so_cred,
1325 	};
1326 	struct rpc_task_setup task_setup_data = {
1327 		.rpc_client = server->client,
1328 		.rpc_message = &msg,
1329 		.callback_ops = &nfs4_open_confirm_ops,
1330 		.callback_data = data,
1331 		.workqueue = nfsiod_workqueue,
1332 		.flags = RPC_TASK_ASYNC,
1333 	};
1334 	int status;
1335 
1336 	kref_get(&data->kref);
1337 	data->rpc_done = 0;
1338 	data->rpc_status = 0;
1339 	data->timestamp = jiffies;
1340 	task = rpc_run_task(&task_setup_data);
1341 	if (IS_ERR(task))
1342 		return PTR_ERR(task);
1343 	status = nfs4_wait_for_completion_rpc_task(task);
1344 	if (status != 0) {
1345 		data->cancelled = 1;
1346 		smp_wmb();
1347 	} else
1348 		status = data->rpc_status;
1349 	rpc_put_task(task);
1350 	return status;
1351 }
1352 
1353 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1354 {
1355 	struct nfs4_opendata *data = calldata;
1356 	struct nfs4_state_owner *sp = data->owner;
1357 
1358 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1359 		return;
1360 	/*
1361 	 * Check if we still need to send an OPEN call, or if we can use
1362 	 * a delegation instead.
1363 	 */
1364 	if (data->state != NULL) {
1365 		struct nfs_delegation *delegation;
1366 
1367 		if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1368 			goto out_no_action;
1369 		rcu_read_lock();
1370 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1371 		if (delegation != NULL &&
1372 		    test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
1373 			rcu_read_unlock();
1374 			goto out_no_action;
1375 		}
1376 		rcu_read_unlock();
1377 	}
1378 	/* Update sequence id. */
1379 	data->o_arg.id = sp->so_owner_id.id;
1380 	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1381 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1382 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1383 		nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1384 	}
1385 	data->timestamp = jiffies;
1386 	if (nfs4_setup_sequence(data->o_arg.server,
1387 				&data->o_arg.seq_args,
1388 				&data->o_res.seq_res, 1, task))
1389 		return;
1390 	rpc_call_start(task);
1391 	return;
1392 out_no_action:
1393 	task->tk_action = NULL;
1394 
1395 }
1396 
1397 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1398 {
1399 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1400 	nfs4_open_prepare(task, calldata);
1401 }
1402 
1403 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1404 {
1405 	struct nfs4_opendata *data = calldata;
1406 
1407 	data->rpc_status = task->tk_status;
1408 
1409 	if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1410 		return;
1411 
1412 	if (task->tk_status == 0) {
1413 		switch (data->o_res.f_attr->mode & S_IFMT) {
1414 			case S_IFREG:
1415 				break;
1416 			case S_IFLNK:
1417 				data->rpc_status = -ELOOP;
1418 				break;
1419 			case S_IFDIR:
1420 				data->rpc_status = -EISDIR;
1421 				break;
1422 			default:
1423 				data->rpc_status = -ENOTDIR;
1424 		}
1425 		renew_lease(data->o_res.server, data->timestamp);
1426 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1427 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
1428 	}
1429 	data->rpc_done = 1;
1430 }
1431 
1432 static void nfs4_open_release(void *calldata)
1433 {
1434 	struct nfs4_opendata *data = calldata;
1435 	struct nfs4_state *state = NULL;
1436 
1437 	/* If this request hasn't been cancelled, do nothing */
1438 	if (data->cancelled == 0)
1439 		goto out_free;
1440 	/* In case of error, no cleanup! */
1441 	if (data->rpc_status != 0 || !data->rpc_done)
1442 		goto out_free;
1443 	/* In case we need an open_confirm, no cleanup! */
1444 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1445 		goto out_free;
1446 	state = nfs4_opendata_to_nfs4_state(data);
1447 	if (!IS_ERR(state))
1448 		nfs4_close_state(&data->path, state, data->o_arg.fmode);
1449 out_free:
1450 	nfs4_opendata_put(data);
1451 }
1452 
1453 static const struct rpc_call_ops nfs4_open_ops = {
1454 	.rpc_call_prepare = nfs4_open_prepare,
1455 	.rpc_call_done = nfs4_open_done,
1456 	.rpc_release = nfs4_open_release,
1457 };
1458 
1459 static const struct rpc_call_ops nfs4_recover_open_ops = {
1460 	.rpc_call_prepare = nfs4_recover_open_prepare,
1461 	.rpc_call_done = nfs4_open_done,
1462 	.rpc_release = nfs4_open_release,
1463 };
1464 
1465 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1466 {
1467 	struct inode *dir = data->dir->d_inode;
1468 	struct nfs_server *server = NFS_SERVER(dir);
1469 	struct nfs_openargs *o_arg = &data->o_arg;
1470 	struct nfs_openres *o_res = &data->o_res;
1471 	struct rpc_task *task;
1472 	struct rpc_message msg = {
1473 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1474 		.rpc_argp = o_arg,
1475 		.rpc_resp = o_res,
1476 		.rpc_cred = data->owner->so_cred,
1477 	};
1478 	struct rpc_task_setup task_setup_data = {
1479 		.rpc_client = server->client,
1480 		.rpc_message = &msg,
1481 		.callback_ops = &nfs4_open_ops,
1482 		.callback_data = data,
1483 		.workqueue = nfsiod_workqueue,
1484 		.flags = RPC_TASK_ASYNC,
1485 	};
1486 	int status;
1487 
1488 	kref_get(&data->kref);
1489 	data->rpc_done = 0;
1490 	data->rpc_status = 0;
1491 	data->cancelled = 0;
1492 	if (isrecover)
1493 		task_setup_data.callback_ops = &nfs4_recover_open_ops;
1494 	task = rpc_run_task(&task_setup_data);
1495         if (IS_ERR(task))
1496                 return PTR_ERR(task);
1497         status = nfs4_wait_for_completion_rpc_task(task);
1498         if (status != 0) {
1499                 data->cancelled = 1;
1500                 smp_wmb();
1501         } else
1502                 status = data->rpc_status;
1503         rpc_put_task(task);
1504 
1505 	return status;
1506 }
1507 
1508 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1509 {
1510 	struct inode *dir = data->dir->d_inode;
1511 	struct nfs_openres *o_res = &data->o_res;
1512         int status;
1513 
1514 	status = nfs4_run_open_task(data, 1);
1515 	if (status != 0 || !data->rpc_done)
1516 		return status;
1517 
1518 	nfs_refresh_inode(dir, o_res->dir_attr);
1519 
1520 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1521 		status = _nfs4_proc_open_confirm(data);
1522 		if (status != 0)
1523 			return status;
1524 	}
1525 
1526 	return status;
1527 }
1528 
1529 /*
1530  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1531  */
1532 static int _nfs4_proc_open(struct nfs4_opendata *data)
1533 {
1534 	struct inode *dir = data->dir->d_inode;
1535 	struct nfs_server *server = NFS_SERVER(dir);
1536 	struct nfs_openargs *o_arg = &data->o_arg;
1537 	struct nfs_openres *o_res = &data->o_res;
1538 	int status;
1539 
1540 	status = nfs4_run_open_task(data, 0);
1541 	if (status != 0 || !data->rpc_done)
1542 		return status;
1543 
1544 	if (o_arg->open_flags & O_CREAT) {
1545 		update_changeattr(dir, &o_res->cinfo);
1546 		nfs_post_op_update_inode(dir, o_res->dir_attr);
1547 	} else
1548 		nfs_refresh_inode(dir, o_res->dir_attr);
1549 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1550 		server->caps &= ~NFS_CAP_POSIX_LOCK;
1551 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1552 		status = _nfs4_proc_open_confirm(data);
1553 		if (status != 0)
1554 			return status;
1555 	}
1556 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1557 		_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1558 	return 0;
1559 }
1560 
1561 static int nfs4_recover_expired_lease(struct nfs_server *server)
1562 {
1563 	struct nfs_client *clp = server->nfs_client;
1564 	unsigned int loop;
1565 	int ret;
1566 
1567 	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1568 		ret = nfs4_wait_clnt_recover(clp);
1569 		if (ret != 0)
1570 			break;
1571 		if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1572 		    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1573 			break;
1574 		nfs4_schedule_state_recovery(clp);
1575 		ret = -EIO;
1576 	}
1577 	return ret;
1578 }
1579 
1580 /*
1581  * OPEN_EXPIRED:
1582  * 	reclaim state on the server after a network partition.
1583  * 	Assumes caller holds the appropriate lock
1584  */
1585 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1586 {
1587 	struct nfs4_opendata *opendata;
1588 	int ret;
1589 
1590 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1591 	if (IS_ERR(opendata))
1592 		return PTR_ERR(opendata);
1593 	ret = nfs4_open_recover(opendata, state);
1594 	if (ret == -ESTALE)
1595 		d_drop(ctx->path.dentry);
1596 	nfs4_opendata_put(opendata);
1597 	return ret;
1598 }
1599 
1600 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1601 {
1602 	struct nfs_server *server = NFS_SERVER(state->inode);
1603 	struct nfs4_exception exception = { };
1604 	int err;
1605 
1606 	do {
1607 		err = _nfs4_open_expired(ctx, state);
1608 		switch (err) {
1609 		default:
1610 			goto out;
1611 		case -NFS4ERR_GRACE:
1612 		case -NFS4ERR_DELAY:
1613 			nfs4_handle_exception(server, err, &exception);
1614 			err = 0;
1615 		}
1616 	} while (exception.retry);
1617 out:
1618 	return err;
1619 }
1620 
1621 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1622 {
1623 	struct nfs_open_context *ctx;
1624 	int ret;
1625 
1626 	ctx = nfs4_state_find_open_context(state);
1627 	if (IS_ERR(ctx))
1628 		return PTR_ERR(ctx);
1629 	ret = nfs4_do_open_expired(ctx, state);
1630 	put_nfs_open_context(ctx);
1631 	return ret;
1632 }
1633 
1634 /*
1635  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1636  * fields corresponding to attributes that were used to store the verifier.
1637  * Make sure we clobber those fields in the later setattr call
1638  */
1639 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1640 {
1641 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1642 	    !(sattr->ia_valid & ATTR_ATIME_SET))
1643 		sattr->ia_valid |= ATTR_ATIME;
1644 
1645 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1646 	    !(sattr->ia_valid & ATTR_MTIME_SET))
1647 		sattr->ia_valid |= ATTR_MTIME;
1648 }
1649 
1650 /*
1651  * Returns a referenced nfs4_state
1652  */
1653 static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1654 {
1655 	struct nfs4_state_owner  *sp;
1656 	struct nfs4_state     *state = NULL;
1657 	struct nfs_server       *server = NFS_SERVER(dir);
1658 	struct nfs4_opendata *opendata;
1659 	int status;
1660 
1661 	/* Protect against reboot recovery conflicts */
1662 	status = -ENOMEM;
1663 	if (!(sp = nfs4_get_state_owner(server, cred))) {
1664 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1665 		goto out_err;
1666 	}
1667 	status = nfs4_recover_expired_lease(server);
1668 	if (status != 0)
1669 		goto err_put_state_owner;
1670 	if (path->dentry->d_inode != NULL)
1671 		nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
1672 	status = -ENOMEM;
1673 	opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL);
1674 	if (opendata == NULL)
1675 		goto err_put_state_owner;
1676 
1677 	if (path->dentry->d_inode != NULL)
1678 		opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp);
1679 
1680 	status = _nfs4_proc_open(opendata);
1681 	if (status != 0)
1682 		goto err_opendata_put;
1683 
1684 	state = nfs4_opendata_to_nfs4_state(opendata);
1685 	status = PTR_ERR(state);
1686 	if (IS_ERR(state))
1687 		goto err_opendata_put;
1688 	if (server->caps & NFS_CAP_POSIX_LOCK)
1689 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1690 
1691 	if (opendata->o_arg.open_flags & O_EXCL) {
1692 		nfs4_exclusive_attrset(opendata, sattr);
1693 
1694 		nfs_fattr_init(opendata->o_res.f_attr);
1695 		status = nfs4_do_setattr(state->inode, cred,
1696 				opendata->o_res.f_attr, sattr,
1697 				state);
1698 		if (status == 0)
1699 			nfs_setattr_update_inode(state->inode, sattr);
1700 		nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1701 	}
1702 	nfs4_opendata_put(opendata);
1703 	nfs4_put_state_owner(sp);
1704 	*res = state;
1705 	return 0;
1706 err_opendata_put:
1707 	nfs4_opendata_put(opendata);
1708 err_put_state_owner:
1709 	nfs4_put_state_owner(sp);
1710 out_err:
1711 	*res = NULL;
1712 	return status;
1713 }
1714 
1715 
1716 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1717 {
1718 	struct nfs4_exception exception = { };
1719 	struct nfs4_state *res;
1720 	int status;
1721 
1722 	do {
1723 		status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res);
1724 		if (status == 0)
1725 			break;
1726 		/* NOTE: BAD_SEQID means the server and client disagree about the
1727 		 * book-keeping w.r.t. state-changing operations
1728 		 * (OPEN/CLOSE/LOCK/LOCKU...)
1729 		 * It is actually a sign of a bug on the client or on the server.
1730 		 *
1731 		 * If we receive a BAD_SEQID error in the particular case of
1732 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1733 		 * have unhashed the old state_owner for us, and that we can
1734 		 * therefore safely retry using a new one. We should still warn
1735 		 * the user though...
1736 		 */
1737 		if (status == -NFS4ERR_BAD_SEQID) {
1738 			printk(KERN_WARNING "NFS: v4 server %s "
1739 					" returned a bad sequence-id error!\n",
1740 					NFS_SERVER(dir)->nfs_client->cl_hostname);
1741 			exception.retry = 1;
1742 			continue;
1743 		}
1744 		/*
1745 		 * BAD_STATEID on OPEN means that the server cancelled our
1746 		 * state before it received the OPEN_CONFIRM.
1747 		 * Recover by retrying the request as per the discussion
1748 		 * on Page 181 of RFC3530.
1749 		 */
1750 		if (status == -NFS4ERR_BAD_STATEID) {
1751 			exception.retry = 1;
1752 			continue;
1753 		}
1754 		if (status == -EAGAIN) {
1755 			/* We must have found a delegation */
1756 			exception.retry = 1;
1757 			continue;
1758 		}
1759 		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1760 					status, &exception));
1761 	} while (exception.retry);
1762 	return res;
1763 }
1764 
1765 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1766 			    struct nfs_fattr *fattr, struct iattr *sattr,
1767 			    struct nfs4_state *state)
1768 {
1769 	struct nfs_server *server = NFS_SERVER(inode);
1770         struct nfs_setattrargs  arg = {
1771                 .fh             = NFS_FH(inode),
1772                 .iap            = sattr,
1773 		.server		= server,
1774 		.bitmask = server->attr_bitmask,
1775         };
1776         struct nfs_setattrres  res = {
1777 		.fattr		= fattr,
1778 		.server		= server,
1779         };
1780         struct rpc_message msg = {
1781 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1782 		.rpc_argp	= &arg,
1783 		.rpc_resp	= &res,
1784 		.rpc_cred	= cred,
1785         };
1786 	unsigned long timestamp = jiffies;
1787 	int status;
1788 
1789 	nfs_fattr_init(fattr);
1790 
1791 	if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
1792 		/* Use that stateid */
1793 	} else if (state != NULL) {
1794 		nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
1795 	} else
1796 		memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
1797 
1798 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
1799 	if (status == 0 && state != NULL)
1800 		renew_lease(server, timestamp);
1801 	return status;
1802 }
1803 
1804 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1805 			   struct nfs_fattr *fattr, struct iattr *sattr,
1806 			   struct nfs4_state *state)
1807 {
1808 	struct nfs_server *server = NFS_SERVER(inode);
1809 	struct nfs4_exception exception = { };
1810 	int err;
1811 	do {
1812 		err = nfs4_handle_exception(server,
1813 				_nfs4_do_setattr(inode, cred, fattr, sattr, state),
1814 				&exception);
1815 	} while (exception.retry);
1816 	return err;
1817 }
1818 
1819 struct nfs4_closedata {
1820 	struct path path;
1821 	struct inode *inode;
1822 	struct nfs4_state *state;
1823 	struct nfs_closeargs arg;
1824 	struct nfs_closeres res;
1825 	struct nfs_fattr fattr;
1826 	unsigned long timestamp;
1827 };
1828 
1829 static void nfs4_free_closedata(void *data)
1830 {
1831 	struct nfs4_closedata *calldata = data;
1832 	struct nfs4_state_owner *sp = calldata->state->owner;
1833 
1834 	nfs4_put_open_state(calldata->state);
1835 	nfs_free_seqid(calldata->arg.seqid);
1836 	nfs4_put_state_owner(sp);
1837 	path_put(&calldata->path);
1838 	kfree(calldata);
1839 }
1840 
1841 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
1842 		fmode_t fmode)
1843 {
1844 	spin_lock(&state->owner->so_lock);
1845 	if (!(fmode & FMODE_READ))
1846 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1847 	if (!(fmode & FMODE_WRITE))
1848 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1849 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1850 	spin_unlock(&state->owner->so_lock);
1851 }
1852 
1853 static void nfs4_close_done(struct rpc_task *task, void *data)
1854 {
1855 	struct nfs4_closedata *calldata = data;
1856 	struct nfs4_state *state = calldata->state;
1857 	struct nfs_server *server = NFS_SERVER(calldata->inode);
1858 
1859 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
1860 		return;
1861         /* hmm. we are done with the inode, and in the process of freeing
1862 	 * the state_owner. we keep this around to process errors
1863 	 */
1864 	switch (task->tk_status) {
1865 		case 0:
1866 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
1867 			renew_lease(server, calldata->timestamp);
1868 			nfs4_close_clear_stateid_flags(state,
1869 					calldata->arg.fmode);
1870 			break;
1871 		case -NFS4ERR_STALE_STATEID:
1872 		case -NFS4ERR_OLD_STATEID:
1873 		case -NFS4ERR_BAD_STATEID:
1874 		case -NFS4ERR_EXPIRED:
1875 			if (calldata->arg.fmode == 0)
1876 				break;
1877 		default:
1878 			if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
1879 				rpc_restart_call_prepare(task);
1880 	}
1881 	nfs_release_seqid(calldata->arg.seqid);
1882 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
1883 }
1884 
1885 static void nfs4_close_prepare(struct rpc_task *task, void *data)
1886 {
1887 	struct nfs4_closedata *calldata = data;
1888 	struct nfs4_state *state = calldata->state;
1889 	int call_close = 0;
1890 
1891 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
1892 		return;
1893 
1894 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1895 	calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
1896 	spin_lock(&state->owner->so_lock);
1897 	/* Calculate the change in open mode */
1898 	if (state->n_rdwr == 0) {
1899 		if (state->n_rdonly == 0) {
1900 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
1901 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
1902 			calldata->arg.fmode &= ~FMODE_READ;
1903 		}
1904 		if (state->n_wronly == 0) {
1905 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
1906 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
1907 			calldata->arg.fmode &= ~FMODE_WRITE;
1908 		}
1909 	}
1910 	spin_unlock(&state->owner->so_lock);
1911 
1912 	if (!call_close) {
1913 		/* Note: exit _without_ calling nfs4_close_done */
1914 		task->tk_action = NULL;
1915 		return;
1916 	}
1917 
1918 	if (calldata->arg.fmode == 0)
1919 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
1920 
1921 	nfs_fattr_init(calldata->res.fattr);
1922 	calldata->timestamp = jiffies;
1923 	if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
1924 				&calldata->arg.seq_args, &calldata->res.seq_res,
1925 				1, task))
1926 		return;
1927 	rpc_call_start(task);
1928 }
1929 
1930 static const struct rpc_call_ops nfs4_close_ops = {
1931 	.rpc_call_prepare = nfs4_close_prepare,
1932 	.rpc_call_done = nfs4_close_done,
1933 	.rpc_release = nfs4_free_closedata,
1934 };
1935 
1936 /*
1937  * It is possible for data to be read/written from a mem-mapped file
1938  * after the sys_close call (which hits the vfs layer as a flush).
1939  * This means that we can't safely call nfsv4 close on a file until
1940  * the inode is cleared. This in turn means that we are not good
1941  * NFSv4 citizens - we do not indicate to the server to update the file's
1942  * share state even when we are done with one of the three share
1943  * stateid's in the inode.
1944  *
1945  * NOTE: Caller must be holding the sp->so_owner semaphore!
1946  */
1947 int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
1948 {
1949 	struct nfs_server *server = NFS_SERVER(state->inode);
1950 	struct nfs4_closedata *calldata;
1951 	struct nfs4_state_owner *sp = state->owner;
1952 	struct rpc_task *task;
1953 	struct rpc_message msg = {
1954 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
1955 		.rpc_cred = state->owner->so_cred,
1956 	};
1957 	struct rpc_task_setup task_setup_data = {
1958 		.rpc_client = server->client,
1959 		.rpc_message = &msg,
1960 		.callback_ops = &nfs4_close_ops,
1961 		.workqueue = nfsiod_workqueue,
1962 		.flags = RPC_TASK_ASYNC,
1963 	};
1964 	int status = -ENOMEM;
1965 
1966 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
1967 	if (calldata == NULL)
1968 		goto out;
1969 	calldata->inode = state->inode;
1970 	calldata->state = state;
1971 	calldata->arg.fh = NFS_FH(state->inode);
1972 	calldata->arg.stateid = &state->open_stateid;
1973 	/* Serialization for the sequence id */
1974 	calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
1975 	if (calldata->arg.seqid == NULL)
1976 		goto out_free_calldata;
1977 	calldata->arg.fmode = 0;
1978 	calldata->arg.bitmask = server->cache_consistency_bitmask;
1979 	calldata->res.fattr = &calldata->fattr;
1980 	calldata->res.seqid = calldata->arg.seqid;
1981 	calldata->res.server = server;
1982 	path_get(path);
1983 	calldata->path = *path;
1984 
1985 	msg.rpc_argp = &calldata->arg,
1986 	msg.rpc_resp = &calldata->res,
1987 	task_setup_data.callback_data = calldata;
1988 	task = rpc_run_task(&task_setup_data);
1989 	if (IS_ERR(task))
1990 		return PTR_ERR(task);
1991 	status = 0;
1992 	if (wait)
1993 		status = rpc_wait_for_completion_task(task);
1994 	rpc_put_task(task);
1995 	return status;
1996 out_free_calldata:
1997 	kfree(calldata);
1998 out:
1999 	nfs4_put_open_state(state);
2000 	nfs4_put_state_owner(sp);
2001 	return status;
2002 }
2003 
2004 static struct inode *
2005 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2006 {
2007 	struct nfs4_state *state;
2008 
2009 	/* Protect against concurrent sillydeletes */
2010 	state = nfs4_do_open(dir, &ctx->path, ctx->mode, open_flags, attr, ctx->cred);
2011 	if (IS_ERR(state))
2012 		return ERR_CAST(state);
2013 	ctx->state = state;
2014 	return igrab(state->inode);
2015 }
2016 
2017 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2018 {
2019 	if (ctx->state == NULL)
2020 		return;
2021 	if (is_sync)
2022 		nfs4_close_sync(&ctx->path, ctx->state, ctx->mode);
2023 	else
2024 		nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
2025 }
2026 
2027 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2028 {
2029 	struct nfs4_server_caps_arg args = {
2030 		.fhandle = fhandle,
2031 	};
2032 	struct nfs4_server_caps_res res = {};
2033 	struct rpc_message msg = {
2034 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2035 		.rpc_argp = &args,
2036 		.rpc_resp = &res,
2037 	};
2038 	int status;
2039 
2040 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
2041 	if (status == 0) {
2042 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2043 		server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2044 				NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2045 				NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2046 				NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2047 				NFS_CAP_CTIME|NFS_CAP_MTIME);
2048 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2049 			server->caps |= NFS_CAP_ACLS;
2050 		if (res.has_links != 0)
2051 			server->caps |= NFS_CAP_HARDLINKS;
2052 		if (res.has_symlinks != 0)
2053 			server->caps |= NFS_CAP_SYMLINKS;
2054 		if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2055 			server->caps |= NFS_CAP_FILEID;
2056 		if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2057 			server->caps |= NFS_CAP_MODE;
2058 		if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2059 			server->caps |= NFS_CAP_NLINK;
2060 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2061 			server->caps |= NFS_CAP_OWNER;
2062 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2063 			server->caps |= NFS_CAP_OWNER_GROUP;
2064 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2065 			server->caps |= NFS_CAP_ATIME;
2066 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2067 			server->caps |= NFS_CAP_CTIME;
2068 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2069 			server->caps |= NFS_CAP_MTIME;
2070 
2071 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2072 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2073 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2074 		server->acl_bitmask = res.acl_bitmask;
2075 	}
2076 
2077 	return status;
2078 }
2079 
2080 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2081 {
2082 	struct nfs4_exception exception = { };
2083 	int err;
2084 	do {
2085 		err = nfs4_handle_exception(server,
2086 				_nfs4_server_capabilities(server, fhandle),
2087 				&exception);
2088 	} while (exception.retry);
2089 	return err;
2090 }
2091 
2092 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2093 		struct nfs_fsinfo *info)
2094 {
2095 	struct nfs4_lookup_root_arg args = {
2096 		.bitmask = nfs4_fattr_bitmap,
2097 	};
2098 	struct nfs4_lookup_res res = {
2099 		.server = server,
2100 		.fattr = info->fattr,
2101 		.fh = fhandle,
2102 	};
2103 	struct rpc_message msg = {
2104 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2105 		.rpc_argp = &args,
2106 		.rpc_resp = &res,
2107 	};
2108 
2109 	nfs_fattr_init(info->fattr);
2110 	return nfs4_call_sync(server, &msg, &args, &res, 0);
2111 }
2112 
2113 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2114 		struct nfs_fsinfo *info)
2115 {
2116 	struct nfs4_exception exception = { };
2117 	int err;
2118 	do {
2119 		err = nfs4_handle_exception(server,
2120 				_nfs4_lookup_root(server, fhandle, info),
2121 				&exception);
2122 	} while (exception.retry);
2123 	return err;
2124 }
2125 
2126 /*
2127  * get the file handle for the "/" directory on the server
2128  */
2129 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2130 			      struct nfs_fsinfo *info)
2131 {
2132 	int status;
2133 
2134 	status = nfs4_lookup_root(server, fhandle, info);
2135 	if (status == 0)
2136 		status = nfs4_server_capabilities(server, fhandle);
2137 	if (status == 0)
2138 		status = nfs4_do_fsinfo(server, fhandle, info);
2139 	return nfs4_map_errors(status);
2140 }
2141 
2142 /*
2143  * Get locations and (maybe) other attributes of a referral.
2144  * Note that we'll actually follow the referral later when
2145  * we detect fsid mismatch in inode revalidation
2146  */
2147 static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
2148 {
2149 	int status = -ENOMEM;
2150 	struct page *page = NULL;
2151 	struct nfs4_fs_locations *locations = NULL;
2152 
2153 	page = alloc_page(GFP_KERNEL);
2154 	if (page == NULL)
2155 		goto out;
2156 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2157 	if (locations == NULL)
2158 		goto out;
2159 
2160 	status = nfs4_proc_fs_locations(dir, name, locations, page);
2161 	if (status != 0)
2162 		goto out;
2163 	/* Make sure server returned a different fsid for the referral */
2164 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2165 		dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
2166 		status = -EIO;
2167 		goto out;
2168 	}
2169 
2170 	memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2171 	fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
2172 	if (!fattr->mode)
2173 		fattr->mode = S_IFDIR;
2174 	memset(fhandle, 0, sizeof(struct nfs_fh));
2175 out:
2176 	if (page)
2177 		__free_page(page);
2178 	kfree(locations);
2179 	return status;
2180 }
2181 
2182 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2183 {
2184 	struct nfs4_getattr_arg args = {
2185 		.fh = fhandle,
2186 		.bitmask = server->attr_bitmask,
2187 	};
2188 	struct nfs4_getattr_res res = {
2189 		.fattr = fattr,
2190 		.server = server,
2191 	};
2192 	struct rpc_message msg = {
2193 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2194 		.rpc_argp = &args,
2195 		.rpc_resp = &res,
2196 	};
2197 
2198 	nfs_fattr_init(fattr);
2199 	return nfs4_call_sync(server, &msg, &args, &res, 0);
2200 }
2201 
2202 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2203 {
2204 	struct nfs4_exception exception = { };
2205 	int err;
2206 	do {
2207 		err = nfs4_handle_exception(server,
2208 				_nfs4_proc_getattr(server, fhandle, fattr),
2209 				&exception);
2210 	} while (exception.retry);
2211 	return err;
2212 }
2213 
2214 /*
2215  * The file is not closed if it is opened due to the a request to change
2216  * the size of the file. The open call will not be needed once the
2217  * VFS layer lookup-intents are implemented.
2218  *
2219  * Close is called when the inode is destroyed.
2220  * If we haven't opened the file for O_WRONLY, we
2221  * need to in the size_change case to obtain a stateid.
2222  *
2223  * Got race?
2224  * Because OPEN is always done by name in nfsv4, it is
2225  * possible that we opened a different file by the same
2226  * name.  We can recognize this race condition, but we
2227  * can't do anything about it besides returning an error.
2228  *
2229  * This will be fixed with VFS changes (lookup-intent).
2230  */
2231 static int
2232 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2233 		  struct iattr *sattr)
2234 {
2235 	struct inode *inode = dentry->d_inode;
2236 	struct rpc_cred *cred = NULL;
2237 	struct nfs4_state *state = NULL;
2238 	int status;
2239 
2240 	nfs_fattr_init(fattr);
2241 
2242 	/* Search for an existing open(O_WRITE) file */
2243 	if (sattr->ia_valid & ATTR_FILE) {
2244 		struct nfs_open_context *ctx;
2245 
2246 		ctx = nfs_file_open_context(sattr->ia_file);
2247 		if (ctx) {
2248 			cred = ctx->cred;
2249 			state = ctx->state;
2250 		}
2251 	}
2252 
2253 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2254 	if (status == 0)
2255 		nfs_setattr_update_inode(inode, sattr);
2256 	return status;
2257 }
2258 
2259 static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *dirfh,
2260 		const struct qstr *name, struct nfs_fh *fhandle,
2261 		struct nfs_fattr *fattr)
2262 {
2263 	int		       status;
2264 	struct nfs4_lookup_arg args = {
2265 		.bitmask = server->attr_bitmask,
2266 		.dir_fh = dirfh,
2267 		.name = name,
2268 	};
2269 	struct nfs4_lookup_res res = {
2270 		.server = server,
2271 		.fattr = fattr,
2272 		.fh = fhandle,
2273 	};
2274 	struct rpc_message msg = {
2275 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2276 		.rpc_argp = &args,
2277 		.rpc_resp = &res,
2278 	};
2279 
2280 	nfs_fattr_init(fattr);
2281 
2282 	dprintk("NFS call  lookupfh %s\n", name->name);
2283 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
2284 	dprintk("NFS reply lookupfh: %d\n", status);
2285 	return status;
2286 }
2287 
2288 static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
2289 			      struct qstr *name, struct nfs_fh *fhandle,
2290 			      struct nfs_fattr *fattr)
2291 {
2292 	struct nfs4_exception exception = { };
2293 	int err;
2294 	do {
2295 		err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
2296 		/* FIXME: !!!! */
2297 		if (err == -NFS4ERR_MOVED) {
2298 			err = -EREMOTE;
2299 			break;
2300 		}
2301 		err = nfs4_handle_exception(server, err, &exception);
2302 	} while (exception.retry);
2303 	return err;
2304 }
2305 
2306 static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
2307 		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2308 {
2309 	int status;
2310 
2311 	dprintk("NFS call  lookup %s\n", name->name);
2312 	status = _nfs4_proc_lookupfh(NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
2313 	if (status == -NFS4ERR_MOVED)
2314 		status = nfs4_get_referral(dir, name, fattr, fhandle);
2315 	dprintk("NFS reply lookup: %d\n", status);
2316 	return status;
2317 }
2318 
2319 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2320 {
2321 	struct nfs4_exception exception = { };
2322 	int err;
2323 	do {
2324 		err = nfs4_handle_exception(NFS_SERVER(dir),
2325 				_nfs4_proc_lookup(dir, name, fhandle, fattr),
2326 				&exception);
2327 	} while (exception.retry);
2328 	return err;
2329 }
2330 
2331 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2332 {
2333 	struct nfs_server *server = NFS_SERVER(inode);
2334 	struct nfs4_accessargs args = {
2335 		.fh = NFS_FH(inode),
2336 		.bitmask = server->attr_bitmask,
2337 	};
2338 	struct nfs4_accessres res = {
2339 		.server = server,
2340 	};
2341 	struct rpc_message msg = {
2342 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2343 		.rpc_argp = &args,
2344 		.rpc_resp = &res,
2345 		.rpc_cred = entry->cred,
2346 	};
2347 	int mode = entry->mask;
2348 	int status;
2349 
2350 	/*
2351 	 * Determine which access bits we want to ask for...
2352 	 */
2353 	if (mode & MAY_READ)
2354 		args.access |= NFS4_ACCESS_READ;
2355 	if (S_ISDIR(inode->i_mode)) {
2356 		if (mode & MAY_WRITE)
2357 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2358 		if (mode & MAY_EXEC)
2359 			args.access |= NFS4_ACCESS_LOOKUP;
2360 	} else {
2361 		if (mode & MAY_WRITE)
2362 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2363 		if (mode & MAY_EXEC)
2364 			args.access |= NFS4_ACCESS_EXECUTE;
2365 	}
2366 
2367 	res.fattr = nfs_alloc_fattr();
2368 	if (res.fattr == NULL)
2369 		return -ENOMEM;
2370 
2371 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
2372 	if (!status) {
2373 		entry->mask = 0;
2374 		if (res.access & NFS4_ACCESS_READ)
2375 			entry->mask |= MAY_READ;
2376 		if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2377 			entry->mask |= MAY_WRITE;
2378 		if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2379 			entry->mask |= MAY_EXEC;
2380 		nfs_refresh_inode(inode, res.fattr);
2381 	}
2382 	nfs_free_fattr(res.fattr);
2383 	return status;
2384 }
2385 
2386 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2387 {
2388 	struct nfs4_exception exception = { };
2389 	int err;
2390 	do {
2391 		err = nfs4_handle_exception(NFS_SERVER(inode),
2392 				_nfs4_proc_access(inode, entry),
2393 				&exception);
2394 	} while (exception.retry);
2395 	return err;
2396 }
2397 
2398 /*
2399  * TODO: For the time being, we don't try to get any attributes
2400  * along with any of the zero-copy operations READ, READDIR,
2401  * READLINK, WRITE.
2402  *
2403  * In the case of the first three, we want to put the GETATTR
2404  * after the read-type operation -- this is because it is hard
2405  * to predict the length of a GETATTR response in v4, and thus
2406  * align the READ data correctly.  This means that the GETATTR
2407  * may end up partially falling into the page cache, and we should
2408  * shift it into the 'tail' of the xdr_buf before processing.
2409  * To do this efficiently, we need to know the total length
2410  * of data received, which doesn't seem to be available outside
2411  * of the RPC layer.
2412  *
2413  * In the case of WRITE, we also want to put the GETATTR after
2414  * the operation -- in this case because we want to make sure
2415  * we get the post-operation mtime and size.  This means that
2416  * we can't use xdr_encode_pages() as written: we need a variant
2417  * of it which would leave room in the 'tail' iovec.
2418  *
2419  * Both of these changes to the XDR layer would in fact be quite
2420  * minor, but I decided to leave them for a subsequent patch.
2421  */
2422 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2423 		unsigned int pgbase, unsigned int pglen)
2424 {
2425 	struct nfs4_readlink args = {
2426 		.fh       = NFS_FH(inode),
2427 		.pgbase	  = pgbase,
2428 		.pglen    = pglen,
2429 		.pages    = &page,
2430 	};
2431 	struct nfs4_readlink_res res;
2432 	struct rpc_message msg = {
2433 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2434 		.rpc_argp = &args,
2435 		.rpc_resp = &res,
2436 	};
2437 
2438 	return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
2439 }
2440 
2441 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2442 		unsigned int pgbase, unsigned int pglen)
2443 {
2444 	struct nfs4_exception exception = { };
2445 	int err;
2446 	do {
2447 		err = nfs4_handle_exception(NFS_SERVER(inode),
2448 				_nfs4_proc_readlink(inode, page, pgbase, pglen),
2449 				&exception);
2450 	} while (exception.retry);
2451 	return err;
2452 }
2453 
2454 /*
2455  * Got race?
2456  * We will need to arrange for the VFS layer to provide an atomic open.
2457  * Until then, this create/open method is prone to inefficiency and race
2458  * conditions due to the lookup, create, and open VFS calls from sys_open()
2459  * placed on the wire.
2460  *
2461  * Given the above sorry state of affairs, I'm simply sending an OPEN.
2462  * The file will be opened again in the subsequent VFS open call
2463  * (nfs4_proc_file_open).
2464  *
2465  * The open for read will just hang around to be used by any process that
2466  * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2467  */
2468 
2469 static int
2470 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2471                  int flags, struct nfs_open_context *ctx)
2472 {
2473 	struct path my_path = {
2474 		.dentry = dentry,
2475 	};
2476 	struct path *path = &my_path;
2477 	struct nfs4_state *state;
2478 	struct rpc_cred *cred = NULL;
2479 	fmode_t fmode = 0;
2480 	int status = 0;
2481 
2482 	if (ctx != NULL) {
2483 		cred = ctx->cred;
2484 		path = &ctx->path;
2485 		fmode = ctx->mode;
2486 	}
2487 	state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
2488 	d_drop(dentry);
2489 	if (IS_ERR(state)) {
2490 		status = PTR_ERR(state);
2491 		goto out;
2492 	}
2493 	d_add(dentry, igrab(state->inode));
2494 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2495 	if (ctx != NULL)
2496 		ctx->state = state;
2497 	else
2498 		nfs4_close_sync(path, state, fmode);
2499 out:
2500 	return status;
2501 }
2502 
2503 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2504 {
2505 	struct nfs_server *server = NFS_SERVER(dir);
2506 	struct nfs_removeargs args = {
2507 		.fh = NFS_FH(dir),
2508 		.name.len = name->len,
2509 		.name.name = name->name,
2510 		.bitmask = server->attr_bitmask,
2511 	};
2512 	struct nfs_removeres res = {
2513 		.server = server,
2514 	};
2515 	struct rpc_message msg = {
2516 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2517 		.rpc_argp = &args,
2518 		.rpc_resp = &res,
2519 	};
2520 	int status = -ENOMEM;
2521 
2522 	res.dir_attr = nfs_alloc_fattr();
2523 	if (res.dir_attr == NULL)
2524 		goto out;
2525 
2526 	status = nfs4_call_sync(server, &msg, &args, &res, 1);
2527 	if (status == 0) {
2528 		update_changeattr(dir, &res.cinfo);
2529 		nfs_post_op_update_inode(dir, res.dir_attr);
2530 	}
2531 	nfs_free_fattr(res.dir_attr);
2532 out:
2533 	return status;
2534 }
2535 
2536 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2537 {
2538 	struct nfs4_exception exception = { };
2539 	int err;
2540 	do {
2541 		err = nfs4_handle_exception(NFS_SERVER(dir),
2542 				_nfs4_proc_remove(dir, name),
2543 				&exception);
2544 	} while (exception.retry);
2545 	return err;
2546 }
2547 
2548 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2549 {
2550 	struct nfs_server *server = NFS_SERVER(dir);
2551 	struct nfs_removeargs *args = msg->rpc_argp;
2552 	struct nfs_removeres *res = msg->rpc_resp;
2553 
2554 	args->bitmask = server->cache_consistency_bitmask;
2555 	res->server = server;
2556 	res->seq_res.sr_slot = NULL;
2557 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2558 }
2559 
2560 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2561 {
2562 	struct nfs_removeres *res = task->tk_msg.rpc_resp;
2563 
2564 	if (!nfs4_sequence_done(task, &res->seq_res))
2565 		return 0;
2566 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2567 		return 0;
2568 	update_changeattr(dir, &res->cinfo);
2569 	nfs_post_op_update_inode(dir, res->dir_attr);
2570 	return 1;
2571 }
2572 
2573 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2574 {
2575 	struct nfs_server *server = NFS_SERVER(dir);
2576 	struct nfs_renameargs *arg = msg->rpc_argp;
2577 	struct nfs_renameres *res = msg->rpc_resp;
2578 
2579 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2580 	arg->bitmask = server->attr_bitmask;
2581 	res->server = server;
2582 }
2583 
2584 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2585 				 struct inode *new_dir)
2586 {
2587 	struct nfs_renameres *res = task->tk_msg.rpc_resp;
2588 
2589 	if (!nfs4_sequence_done(task, &res->seq_res))
2590 		return 0;
2591 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2592 		return 0;
2593 
2594 	update_changeattr(old_dir, &res->old_cinfo);
2595 	nfs_post_op_update_inode(old_dir, res->old_fattr);
2596 	update_changeattr(new_dir, &res->new_cinfo);
2597 	nfs_post_op_update_inode(new_dir, res->new_fattr);
2598 	return 1;
2599 }
2600 
2601 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2602 		struct inode *new_dir, struct qstr *new_name)
2603 {
2604 	struct nfs_server *server = NFS_SERVER(old_dir);
2605 	struct nfs_renameargs arg = {
2606 		.old_dir = NFS_FH(old_dir),
2607 		.new_dir = NFS_FH(new_dir),
2608 		.old_name = old_name,
2609 		.new_name = new_name,
2610 		.bitmask = server->attr_bitmask,
2611 	};
2612 	struct nfs_renameres res = {
2613 		.server = server,
2614 	};
2615 	struct rpc_message msg = {
2616 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2617 		.rpc_argp = &arg,
2618 		.rpc_resp = &res,
2619 	};
2620 	int status = -ENOMEM;
2621 
2622 	res.old_fattr = nfs_alloc_fattr();
2623 	res.new_fattr = nfs_alloc_fattr();
2624 	if (res.old_fattr == NULL || res.new_fattr == NULL)
2625 		goto out;
2626 
2627 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
2628 	if (!status) {
2629 		update_changeattr(old_dir, &res.old_cinfo);
2630 		nfs_post_op_update_inode(old_dir, res.old_fattr);
2631 		update_changeattr(new_dir, &res.new_cinfo);
2632 		nfs_post_op_update_inode(new_dir, res.new_fattr);
2633 	}
2634 out:
2635 	nfs_free_fattr(res.new_fattr);
2636 	nfs_free_fattr(res.old_fattr);
2637 	return status;
2638 }
2639 
2640 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2641 		struct inode *new_dir, struct qstr *new_name)
2642 {
2643 	struct nfs4_exception exception = { };
2644 	int err;
2645 	do {
2646 		err = nfs4_handle_exception(NFS_SERVER(old_dir),
2647 				_nfs4_proc_rename(old_dir, old_name,
2648 					new_dir, new_name),
2649 				&exception);
2650 	} while (exception.retry);
2651 	return err;
2652 }
2653 
2654 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2655 {
2656 	struct nfs_server *server = NFS_SERVER(inode);
2657 	struct nfs4_link_arg arg = {
2658 		.fh     = NFS_FH(inode),
2659 		.dir_fh = NFS_FH(dir),
2660 		.name   = name,
2661 		.bitmask = server->attr_bitmask,
2662 	};
2663 	struct nfs4_link_res res = {
2664 		.server = server,
2665 	};
2666 	struct rpc_message msg = {
2667 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2668 		.rpc_argp = &arg,
2669 		.rpc_resp = &res,
2670 	};
2671 	int status = -ENOMEM;
2672 
2673 	res.fattr = nfs_alloc_fattr();
2674 	res.dir_attr = nfs_alloc_fattr();
2675 	if (res.fattr == NULL || res.dir_attr == NULL)
2676 		goto out;
2677 
2678 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
2679 	if (!status) {
2680 		update_changeattr(dir, &res.cinfo);
2681 		nfs_post_op_update_inode(dir, res.dir_attr);
2682 		nfs_post_op_update_inode(inode, res.fattr);
2683 	}
2684 out:
2685 	nfs_free_fattr(res.dir_attr);
2686 	nfs_free_fattr(res.fattr);
2687 	return status;
2688 }
2689 
2690 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2691 {
2692 	struct nfs4_exception exception = { };
2693 	int err;
2694 	do {
2695 		err = nfs4_handle_exception(NFS_SERVER(inode),
2696 				_nfs4_proc_link(inode, dir, name),
2697 				&exception);
2698 	} while (exception.retry);
2699 	return err;
2700 }
2701 
2702 struct nfs4_createdata {
2703 	struct rpc_message msg;
2704 	struct nfs4_create_arg arg;
2705 	struct nfs4_create_res res;
2706 	struct nfs_fh fh;
2707 	struct nfs_fattr fattr;
2708 	struct nfs_fattr dir_fattr;
2709 };
2710 
2711 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
2712 		struct qstr *name, struct iattr *sattr, u32 ftype)
2713 {
2714 	struct nfs4_createdata *data;
2715 
2716 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2717 	if (data != NULL) {
2718 		struct nfs_server *server = NFS_SERVER(dir);
2719 
2720 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
2721 		data->msg.rpc_argp = &data->arg;
2722 		data->msg.rpc_resp = &data->res;
2723 		data->arg.dir_fh = NFS_FH(dir);
2724 		data->arg.server = server;
2725 		data->arg.name = name;
2726 		data->arg.attrs = sattr;
2727 		data->arg.ftype = ftype;
2728 		data->arg.bitmask = server->attr_bitmask;
2729 		data->res.server = server;
2730 		data->res.fh = &data->fh;
2731 		data->res.fattr = &data->fattr;
2732 		data->res.dir_fattr = &data->dir_fattr;
2733 		nfs_fattr_init(data->res.fattr);
2734 		nfs_fattr_init(data->res.dir_fattr);
2735 	}
2736 	return data;
2737 }
2738 
2739 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
2740 {
2741 	int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
2742 				    &data->arg, &data->res, 1);
2743 	if (status == 0) {
2744 		update_changeattr(dir, &data->res.dir_cinfo);
2745 		nfs_post_op_update_inode(dir, data->res.dir_fattr);
2746 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
2747 	}
2748 	return status;
2749 }
2750 
2751 static void nfs4_free_createdata(struct nfs4_createdata *data)
2752 {
2753 	kfree(data);
2754 }
2755 
2756 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2757 		struct page *page, unsigned int len, struct iattr *sattr)
2758 {
2759 	struct nfs4_createdata *data;
2760 	int status = -ENAMETOOLONG;
2761 
2762 	if (len > NFS4_MAXPATHLEN)
2763 		goto out;
2764 
2765 	status = -ENOMEM;
2766 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
2767 	if (data == NULL)
2768 		goto out;
2769 
2770 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
2771 	data->arg.u.symlink.pages = &page;
2772 	data->arg.u.symlink.len = len;
2773 
2774 	status = nfs4_do_create(dir, dentry, data);
2775 
2776 	nfs4_free_createdata(data);
2777 out:
2778 	return status;
2779 }
2780 
2781 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2782 		struct page *page, unsigned int len, struct iattr *sattr)
2783 {
2784 	struct nfs4_exception exception = { };
2785 	int err;
2786 	do {
2787 		err = nfs4_handle_exception(NFS_SERVER(dir),
2788 				_nfs4_proc_symlink(dir, dentry, page,
2789 							len, sattr),
2790 				&exception);
2791 	} while (exception.retry);
2792 	return err;
2793 }
2794 
2795 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2796 		struct iattr *sattr)
2797 {
2798 	struct nfs4_createdata *data;
2799 	int status = -ENOMEM;
2800 
2801 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
2802 	if (data == NULL)
2803 		goto out;
2804 
2805 	status = nfs4_do_create(dir, dentry, data);
2806 
2807 	nfs4_free_createdata(data);
2808 out:
2809 	return status;
2810 }
2811 
2812 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2813 		struct iattr *sattr)
2814 {
2815 	struct nfs4_exception exception = { };
2816 	int err;
2817 	do {
2818 		err = nfs4_handle_exception(NFS_SERVER(dir),
2819 				_nfs4_proc_mkdir(dir, dentry, sattr),
2820 				&exception);
2821 	} while (exception.retry);
2822 	return err;
2823 }
2824 
2825 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2826 		u64 cookie, struct page **pages, unsigned int count, int plus)
2827 {
2828 	struct inode		*dir = dentry->d_inode;
2829 	struct nfs4_readdir_arg args = {
2830 		.fh = NFS_FH(dir),
2831 		.pages = pages,
2832 		.pgbase = 0,
2833 		.count = count,
2834 		.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
2835 	};
2836 	struct nfs4_readdir_res res;
2837 	struct rpc_message msg = {
2838 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
2839 		.rpc_argp = &args,
2840 		.rpc_resp = &res,
2841 		.rpc_cred = cred,
2842 	};
2843 	int			status;
2844 
2845 	dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
2846 			dentry->d_parent->d_name.name,
2847 			dentry->d_name.name,
2848 			(unsigned long long)cookie);
2849 	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
2850 	res.pgbase = args.pgbase;
2851 	status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
2852 	if (status == 0)
2853 		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
2854 
2855 	nfs_invalidate_atime(dir);
2856 
2857 	dprintk("%s: returns %d\n", __func__, status);
2858 	return status;
2859 }
2860 
2861 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2862 		u64 cookie, struct page **pages, unsigned int count, int plus)
2863 {
2864 	struct nfs4_exception exception = { };
2865 	int err;
2866 	do {
2867 		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
2868 				_nfs4_proc_readdir(dentry, cred, cookie,
2869 					pages, count, plus),
2870 				&exception);
2871 	} while (exception.retry);
2872 	return err;
2873 }
2874 
2875 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
2876 		struct iattr *sattr, dev_t rdev)
2877 {
2878 	struct nfs4_createdata *data;
2879 	int mode = sattr->ia_mode;
2880 	int status = -ENOMEM;
2881 
2882 	BUG_ON(!(sattr->ia_valid & ATTR_MODE));
2883 	BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
2884 
2885 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
2886 	if (data == NULL)
2887 		goto out;
2888 
2889 	if (S_ISFIFO(mode))
2890 		data->arg.ftype = NF4FIFO;
2891 	else if (S_ISBLK(mode)) {
2892 		data->arg.ftype = NF4BLK;
2893 		data->arg.u.device.specdata1 = MAJOR(rdev);
2894 		data->arg.u.device.specdata2 = MINOR(rdev);
2895 	}
2896 	else if (S_ISCHR(mode)) {
2897 		data->arg.ftype = NF4CHR;
2898 		data->arg.u.device.specdata1 = MAJOR(rdev);
2899 		data->arg.u.device.specdata2 = MINOR(rdev);
2900 	}
2901 
2902 	status = nfs4_do_create(dir, dentry, data);
2903 
2904 	nfs4_free_createdata(data);
2905 out:
2906 	return status;
2907 }
2908 
2909 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
2910 		struct iattr *sattr, dev_t rdev)
2911 {
2912 	struct nfs4_exception exception = { };
2913 	int err;
2914 	do {
2915 		err = nfs4_handle_exception(NFS_SERVER(dir),
2916 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
2917 				&exception);
2918 	} while (exception.retry);
2919 	return err;
2920 }
2921 
2922 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
2923 		 struct nfs_fsstat *fsstat)
2924 {
2925 	struct nfs4_statfs_arg args = {
2926 		.fh = fhandle,
2927 		.bitmask = server->attr_bitmask,
2928 	};
2929 	struct nfs4_statfs_res res = {
2930 		.fsstat = fsstat,
2931 	};
2932 	struct rpc_message msg = {
2933 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
2934 		.rpc_argp = &args,
2935 		.rpc_resp = &res,
2936 	};
2937 
2938 	nfs_fattr_init(fsstat->fattr);
2939 	return  nfs4_call_sync(server, &msg, &args, &res, 0);
2940 }
2941 
2942 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
2943 {
2944 	struct nfs4_exception exception = { };
2945 	int err;
2946 	do {
2947 		err = nfs4_handle_exception(server,
2948 				_nfs4_proc_statfs(server, fhandle, fsstat),
2949 				&exception);
2950 	} while (exception.retry);
2951 	return err;
2952 }
2953 
2954 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
2955 		struct nfs_fsinfo *fsinfo)
2956 {
2957 	struct nfs4_fsinfo_arg args = {
2958 		.fh = fhandle,
2959 		.bitmask = server->attr_bitmask,
2960 	};
2961 	struct nfs4_fsinfo_res res = {
2962 		.fsinfo = fsinfo,
2963 	};
2964 	struct rpc_message msg = {
2965 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
2966 		.rpc_argp = &args,
2967 		.rpc_resp = &res,
2968 	};
2969 
2970 	return nfs4_call_sync(server, &msg, &args, &res, 0);
2971 }
2972 
2973 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
2974 {
2975 	struct nfs4_exception exception = { };
2976 	int err;
2977 
2978 	do {
2979 		err = nfs4_handle_exception(server,
2980 				_nfs4_do_fsinfo(server, fhandle, fsinfo),
2981 				&exception);
2982 	} while (exception.retry);
2983 	return err;
2984 }
2985 
2986 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
2987 {
2988 	nfs_fattr_init(fsinfo->fattr);
2989 	return nfs4_do_fsinfo(server, fhandle, fsinfo);
2990 }
2991 
2992 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
2993 		struct nfs_pathconf *pathconf)
2994 {
2995 	struct nfs4_pathconf_arg args = {
2996 		.fh = fhandle,
2997 		.bitmask = server->attr_bitmask,
2998 	};
2999 	struct nfs4_pathconf_res res = {
3000 		.pathconf = pathconf,
3001 	};
3002 	struct rpc_message msg = {
3003 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3004 		.rpc_argp = &args,
3005 		.rpc_resp = &res,
3006 	};
3007 
3008 	/* None of the pathconf attributes are mandatory to implement */
3009 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3010 		memset(pathconf, 0, sizeof(*pathconf));
3011 		return 0;
3012 	}
3013 
3014 	nfs_fattr_init(pathconf->fattr);
3015 	return nfs4_call_sync(server, &msg, &args, &res, 0);
3016 }
3017 
3018 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3019 		struct nfs_pathconf *pathconf)
3020 {
3021 	struct nfs4_exception exception = { };
3022 	int err;
3023 
3024 	do {
3025 		err = nfs4_handle_exception(server,
3026 				_nfs4_proc_pathconf(server, fhandle, pathconf),
3027 				&exception);
3028 	} while (exception.retry);
3029 	return err;
3030 }
3031 
3032 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3033 {
3034 	struct nfs_server *server = NFS_SERVER(data->inode);
3035 
3036 	dprintk("--> %s\n", __func__);
3037 
3038 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3039 		return -EAGAIN;
3040 
3041 	if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3042 		nfs_restart_rpc(task, server->nfs_client);
3043 		return -EAGAIN;
3044 	}
3045 
3046 	nfs_invalidate_atime(data->inode);
3047 	if (task->tk_status > 0)
3048 		renew_lease(server, data->timestamp);
3049 	return 0;
3050 }
3051 
3052 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3053 {
3054 	data->timestamp   = jiffies;
3055 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3056 }
3057 
3058 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3059 {
3060 	struct inode *inode = data->inode;
3061 
3062 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3063 		return -EAGAIN;
3064 
3065 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3066 		nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3067 		return -EAGAIN;
3068 	}
3069 	if (task->tk_status >= 0) {
3070 		renew_lease(NFS_SERVER(inode), data->timestamp);
3071 		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
3072 	}
3073 	return 0;
3074 }
3075 
3076 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3077 {
3078 	struct nfs_server *server = NFS_SERVER(data->inode);
3079 
3080 	data->args.bitmask = server->cache_consistency_bitmask;
3081 	data->res.server = server;
3082 	data->timestamp   = jiffies;
3083 
3084 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3085 }
3086 
3087 static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3088 {
3089 	struct inode *inode = data->inode;
3090 
3091 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3092 		return -EAGAIN;
3093 
3094 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3095 		nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3096 		return -EAGAIN;
3097 	}
3098 	nfs_refresh_inode(inode, data->res.fattr);
3099 	return 0;
3100 }
3101 
3102 static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3103 {
3104 	struct nfs_server *server = NFS_SERVER(data->inode);
3105 
3106 	data->args.bitmask = server->cache_consistency_bitmask;
3107 	data->res.server = server;
3108 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3109 }
3110 
3111 struct nfs4_renewdata {
3112 	struct nfs_client	*client;
3113 	unsigned long		timestamp;
3114 };
3115 
3116 /*
3117  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3118  * standalone procedure for queueing an asynchronous RENEW.
3119  */
3120 static void nfs4_renew_release(void *calldata)
3121 {
3122 	struct nfs4_renewdata *data = calldata;
3123 	struct nfs_client *clp = data->client;
3124 
3125 	if (atomic_read(&clp->cl_count) > 1)
3126 		nfs4_schedule_state_renewal(clp);
3127 	nfs_put_client(clp);
3128 	kfree(data);
3129 }
3130 
3131 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3132 {
3133 	struct nfs4_renewdata *data = calldata;
3134 	struct nfs_client *clp = data->client;
3135 	unsigned long timestamp = data->timestamp;
3136 
3137 	if (task->tk_status < 0) {
3138 		/* Unless we're shutting down, schedule state recovery! */
3139 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
3140 			nfs4_schedule_state_recovery(clp);
3141 		return;
3142 	}
3143 	do_renew_lease(clp, timestamp);
3144 }
3145 
3146 static const struct rpc_call_ops nfs4_renew_ops = {
3147 	.rpc_call_done = nfs4_renew_done,
3148 	.rpc_release = nfs4_renew_release,
3149 };
3150 
3151 int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
3152 {
3153 	struct rpc_message msg = {
3154 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3155 		.rpc_argp	= clp,
3156 		.rpc_cred	= cred,
3157 	};
3158 	struct nfs4_renewdata *data;
3159 
3160 	if (!atomic_inc_not_zero(&clp->cl_count))
3161 		return -EIO;
3162 	data = kmalloc(sizeof(*data), GFP_KERNEL);
3163 	if (data == NULL)
3164 		return -ENOMEM;
3165 	data->client = clp;
3166 	data->timestamp = jiffies;
3167 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3168 			&nfs4_renew_ops, data);
3169 }
3170 
3171 int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3172 {
3173 	struct rpc_message msg = {
3174 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3175 		.rpc_argp	= clp,
3176 		.rpc_cred	= cred,
3177 	};
3178 	unsigned long now = jiffies;
3179 	int status;
3180 
3181 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3182 	if (status < 0)
3183 		return status;
3184 	do_renew_lease(clp, now);
3185 	return 0;
3186 }
3187 
3188 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3189 {
3190 	return (server->caps & NFS_CAP_ACLS)
3191 		&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3192 		&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3193 }
3194 
3195 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3196  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3197  * the stack.
3198  */
3199 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3200 
3201 static void buf_to_pages(const void *buf, size_t buflen,
3202 		struct page **pages, unsigned int *pgbase)
3203 {
3204 	const void *p = buf;
3205 
3206 	*pgbase = offset_in_page(buf);
3207 	p -= *pgbase;
3208 	while (p < buf + buflen) {
3209 		*(pages++) = virt_to_page(p);
3210 		p += PAGE_CACHE_SIZE;
3211 	}
3212 }
3213 
3214 struct nfs4_cached_acl {
3215 	int cached;
3216 	size_t len;
3217 	char data[0];
3218 };
3219 
3220 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3221 {
3222 	struct nfs_inode *nfsi = NFS_I(inode);
3223 
3224 	spin_lock(&inode->i_lock);
3225 	kfree(nfsi->nfs4_acl);
3226 	nfsi->nfs4_acl = acl;
3227 	spin_unlock(&inode->i_lock);
3228 }
3229 
3230 static void nfs4_zap_acl_attr(struct inode *inode)
3231 {
3232 	nfs4_set_cached_acl(inode, NULL);
3233 }
3234 
3235 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3236 {
3237 	struct nfs_inode *nfsi = NFS_I(inode);
3238 	struct nfs4_cached_acl *acl;
3239 	int ret = -ENOENT;
3240 
3241 	spin_lock(&inode->i_lock);
3242 	acl = nfsi->nfs4_acl;
3243 	if (acl == NULL)
3244 		goto out;
3245 	if (buf == NULL) /* user is just asking for length */
3246 		goto out_len;
3247 	if (acl->cached == 0)
3248 		goto out;
3249 	ret = -ERANGE; /* see getxattr(2) man page */
3250 	if (acl->len > buflen)
3251 		goto out;
3252 	memcpy(buf, acl->data, acl->len);
3253 out_len:
3254 	ret = acl->len;
3255 out:
3256 	spin_unlock(&inode->i_lock);
3257 	return ret;
3258 }
3259 
3260 static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
3261 {
3262 	struct nfs4_cached_acl *acl;
3263 
3264 	if (buf && acl_len <= PAGE_SIZE) {
3265 		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3266 		if (acl == NULL)
3267 			goto out;
3268 		acl->cached = 1;
3269 		memcpy(acl->data, buf, acl_len);
3270 	} else {
3271 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3272 		if (acl == NULL)
3273 			goto out;
3274 		acl->cached = 0;
3275 	}
3276 	acl->len = acl_len;
3277 out:
3278 	nfs4_set_cached_acl(inode, acl);
3279 }
3280 
3281 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3282 {
3283 	struct page *pages[NFS4ACL_MAXPAGES];
3284 	struct nfs_getaclargs args = {
3285 		.fh = NFS_FH(inode),
3286 		.acl_pages = pages,
3287 		.acl_len = buflen,
3288 	};
3289 	struct nfs_getaclres res = {
3290 		.acl_len = buflen,
3291 	};
3292 	void *resp_buf;
3293 	struct rpc_message msg = {
3294 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3295 		.rpc_argp = &args,
3296 		.rpc_resp = &res,
3297 	};
3298 	struct page *localpage = NULL;
3299 	int ret;
3300 
3301 	if (buflen < PAGE_SIZE) {
3302 		/* As long as we're doing a round trip to the server anyway,
3303 		 * let's be prepared for a page of acl data. */
3304 		localpage = alloc_page(GFP_KERNEL);
3305 		resp_buf = page_address(localpage);
3306 		if (localpage == NULL)
3307 			return -ENOMEM;
3308 		args.acl_pages[0] = localpage;
3309 		args.acl_pgbase = 0;
3310 		args.acl_len = PAGE_SIZE;
3311 	} else {
3312 		resp_buf = buf;
3313 		buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
3314 	}
3315 	ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
3316 	if (ret)
3317 		goto out_free;
3318 	if (res.acl_len > args.acl_len)
3319 		nfs4_write_cached_acl(inode, NULL, res.acl_len);
3320 	else
3321 		nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
3322 	if (buf) {
3323 		ret = -ERANGE;
3324 		if (res.acl_len > buflen)
3325 			goto out_free;
3326 		if (localpage)
3327 			memcpy(buf, resp_buf, res.acl_len);
3328 	}
3329 	ret = res.acl_len;
3330 out_free:
3331 	if (localpage)
3332 		__free_page(localpage);
3333 	return ret;
3334 }
3335 
3336 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3337 {
3338 	struct nfs4_exception exception = { };
3339 	ssize_t ret;
3340 	do {
3341 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3342 		if (ret >= 0)
3343 			break;
3344 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3345 	} while (exception.retry);
3346 	return ret;
3347 }
3348 
3349 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3350 {
3351 	struct nfs_server *server = NFS_SERVER(inode);
3352 	int ret;
3353 
3354 	if (!nfs4_server_supports_acls(server))
3355 		return -EOPNOTSUPP;
3356 	ret = nfs_revalidate_inode(server, inode);
3357 	if (ret < 0)
3358 		return ret;
3359 	ret = nfs4_read_cached_acl(inode, buf, buflen);
3360 	if (ret != -ENOENT)
3361 		return ret;
3362 	return nfs4_get_acl_uncached(inode, buf, buflen);
3363 }
3364 
3365 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3366 {
3367 	struct nfs_server *server = NFS_SERVER(inode);
3368 	struct page *pages[NFS4ACL_MAXPAGES];
3369 	struct nfs_setaclargs arg = {
3370 		.fh		= NFS_FH(inode),
3371 		.acl_pages	= pages,
3372 		.acl_len	= buflen,
3373 	};
3374 	struct nfs_setaclres res;
3375 	struct rpc_message msg = {
3376 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3377 		.rpc_argp	= &arg,
3378 		.rpc_resp	= &res,
3379 	};
3380 	int ret;
3381 
3382 	if (!nfs4_server_supports_acls(server))
3383 		return -EOPNOTSUPP;
3384 	nfs_inode_return_delegation(inode);
3385 	buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3386 	ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
3387 	nfs_access_zap_cache(inode);
3388 	nfs_zap_acl_cache(inode);
3389 	return ret;
3390 }
3391 
3392 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3393 {
3394 	struct nfs4_exception exception = { };
3395 	int err;
3396 	do {
3397 		err = nfs4_handle_exception(NFS_SERVER(inode),
3398 				__nfs4_proc_set_acl(inode, buf, buflen),
3399 				&exception);
3400 	} while (exception.retry);
3401 	return err;
3402 }
3403 
3404 static int
3405 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3406 {
3407 	struct nfs_client *clp = server->nfs_client;
3408 
3409 	if (task->tk_status >= 0)
3410 		return 0;
3411 	switch(task->tk_status) {
3412 		case -NFS4ERR_ADMIN_REVOKED:
3413 		case -NFS4ERR_BAD_STATEID:
3414 		case -NFS4ERR_OPENMODE:
3415 			if (state == NULL)
3416 				break;
3417 			nfs4_state_mark_reclaim_nograce(clp, state);
3418 			goto do_state_recovery;
3419 		case -NFS4ERR_STALE_STATEID:
3420 		case -NFS4ERR_STALE_CLIENTID:
3421 		case -NFS4ERR_EXPIRED:
3422 			goto do_state_recovery;
3423 #if defined(CONFIG_NFS_V4_1)
3424 		case -NFS4ERR_BADSESSION:
3425 		case -NFS4ERR_BADSLOT:
3426 		case -NFS4ERR_BAD_HIGH_SLOT:
3427 		case -NFS4ERR_DEADSESSION:
3428 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3429 		case -NFS4ERR_SEQ_FALSE_RETRY:
3430 		case -NFS4ERR_SEQ_MISORDERED:
3431 			dprintk("%s ERROR %d, Reset session\n", __func__,
3432 				task->tk_status);
3433 			nfs4_schedule_state_recovery(clp);
3434 			task->tk_status = 0;
3435 			return -EAGAIN;
3436 #endif /* CONFIG_NFS_V4_1 */
3437 		case -NFS4ERR_DELAY:
3438 			nfs_inc_server_stats(server, NFSIOS_DELAY);
3439 		case -NFS4ERR_GRACE:
3440 		case -EKEYEXPIRED:
3441 			rpc_delay(task, NFS4_POLL_RETRY_MAX);
3442 			task->tk_status = 0;
3443 			return -EAGAIN;
3444 		case -NFS4ERR_OLD_STATEID:
3445 			task->tk_status = 0;
3446 			return -EAGAIN;
3447 	}
3448 	task->tk_status = nfs4_map_errors(task->tk_status);
3449 	return 0;
3450 do_state_recovery:
3451 	rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3452 	nfs4_schedule_state_recovery(clp);
3453 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3454 		rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3455 	task->tk_status = 0;
3456 	return -EAGAIN;
3457 }
3458 
3459 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3460 		unsigned short port, struct rpc_cred *cred,
3461 		struct nfs4_setclientid_res *res)
3462 {
3463 	nfs4_verifier sc_verifier;
3464 	struct nfs4_setclientid setclientid = {
3465 		.sc_verifier = &sc_verifier,
3466 		.sc_prog = program,
3467 	};
3468 	struct rpc_message msg = {
3469 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3470 		.rpc_argp = &setclientid,
3471 		.rpc_resp = res,
3472 		.rpc_cred = cred,
3473 	};
3474 	__be32 *p;
3475 	int loop = 0;
3476 	int status;
3477 
3478 	p = (__be32*)sc_verifier.data;
3479 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
3480 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
3481 
3482 	for(;;) {
3483 		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3484 				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3485 				clp->cl_ipaddr,
3486 				rpc_peeraddr2str(clp->cl_rpcclient,
3487 							RPC_DISPLAY_ADDR),
3488 				rpc_peeraddr2str(clp->cl_rpcclient,
3489 							RPC_DISPLAY_PROTO),
3490 				clp->cl_rpcclient->cl_auth->au_ops->au_name,
3491 				clp->cl_id_uniquifier);
3492 		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3493 				sizeof(setclientid.sc_netid),
3494 				rpc_peeraddr2str(clp->cl_rpcclient,
3495 							RPC_DISPLAY_NETID));
3496 		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3497 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3498 				clp->cl_ipaddr, port >> 8, port & 255);
3499 
3500 		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3501 		if (status != -NFS4ERR_CLID_INUSE)
3502 			break;
3503 		if (signalled())
3504 			break;
3505 		if (loop++ & 1)
3506 			ssleep(clp->cl_lease_time + 1);
3507 		else
3508 			if (++clp->cl_id_uniquifier == 0)
3509 				break;
3510 	}
3511 	return status;
3512 }
3513 
3514 static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp,
3515 		struct nfs4_setclientid_res *arg,
3516 		struct rpc_cred *cred)
3517 {
3518 	struct nfs_fsinfo fsinfo;
3519 	struct rpc_message msg = {
3520 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
3521 		.rpc_argp = arg,
3522 		.rpc_resp = &fsinfo,
3523 		.rpc_cred = cred,
3524 	};
3525 	unsigned long now;
3526 	int status;
3527 
3528 	now = jiffies;
3529 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3530 	if (status == 0) {
3531 		spin_lock(&clp->cl_lock);
3532 		clp->cl_lease_time = fsinfo.lease_time * HZ;
3533 		clp->cl_last_renewal = now;
3534 		spin_unlock(&clp->cl_lock);
3535 	}
3536 	return status;
3537 }
3538 
3539 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
3540 		struct nfs4_setclientid_res *arg,
3541 		struct rpc_cred *cred)
3542 {
3543 	long timeout = 0;
3544 	int err;
3545 	do {
3546 		err = _nfs4_proc_setclientid_confirm(clp, arg, cred);
3547 		switch (err) {
3548 			case 0:
3549 				return err;
3550 			case -NFS4ERR_RESOURCE:
3551 				/* The IBM lawyers misread another document! */
3552 			case -NFS4ERR_DELAY:
3553 				err = nfs4_delay(clp->cl_rpcclient, &timeout);
3554 		}
3555 	} while (err == 0);
3556 	return err;
3557 }
3558 
3559 struct nfs4_delegreturndata {
3560 	struct nfs4_delegreturnargs args;
3561 	struct nfs4_delegreturnres res;
3562 	struct nfs_fh fh;
3563 	nfs4_stateid stateid;
3564 	unsigned long timestamp;
3565 	struct nfs_fattr fattr;
3566 	int rpc_status;
3567 };
3568 
3569 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
3570 {
3571 	struct nfs4_delegreturndata *data = calldata;
3572 
3573 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3574 		return;
3575 
3576 	switch (task->tk_status) {
3577 	case -NFS4ERR_STALE_STATEID:
3578 	case -NFS4ERR_EXPIRED:
3579 	case 0:
3580 		renew_lease(data->res.server, data->timestamp);
3581 		break;
3582 	default:
3583 		if (nfs4_async_handle_error(task, data->res.server, NULL) ==
3584 				-EAGAIN) {
3585 			nfs_restart_rpc(task, data->res.server->nfs_client);
3586 			return;
3587 		}
3588 	}
3589 	data->rpc_status = task->tk_status;
3590 }
3591 
3592 static void nfs4_delegreturn_release(void *calldata)
3593 {
3594 	kfree(calldata);
3595 }
3596 
3597 #if defined(CONFIG_NFS_V4_1)
3598 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
3599 {
3600 	struct nfs4_delegreturndata *d_data;
3601 
3602 	d_data = (struct nfs4_delegreturndata *)data;
3603 
3604 	if (nfs4_setup_sequence(d_data->res.server,
3605 				&d_data->args.seq_args,
3606 				&d_data->res.seq_res, 1, task))
3607 		return;
3608 	rpc_call_start(task);
3609 }
3610 #endif /* CONFIG_NFS_V4_1 */
3611 
3612 static const struct rpc_call_ops nfs4_delegreturn_ops = {
3613 #if defined(CONFIG_NFS_V4_1)
3614 	.rpc_call_prepare = nfs4_delegreturn_prepare,
3615 #endif /* CONFIG_NFS_V4_1 */
3616 	.rpc_call_done = nfs4_delegreturn_done,
3617 	.rpc_release = nfs4_delegreturn_release,
3618 };
3619 
3620 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3621 {
3622 	struct nfs4_delegreturndata *data;
3623 	struct nfs_server *server = NFS_SERVER(inode);
3624 	struct rpc_task *task;
3625 	struct rpc_message msg = {
3626 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
3627 		.rpc_cred = cred,
3628 	};
3629 	struct rpc_task_setup task_setup_data = {
3630 		.rpc_client = server->client,
3631 		.rpc_message = &msg,
3632 		.callback_ops = &nfs4_delegreturn_ops,
3633 		.flags = RPC_TASK_ASYNC,
3634 	};
3635 	int status = 0;
3636 
3637 	data = kzalloc(sizeof(*data), GFP_NOFS);
3638 	if (data == NULL)
3639 		return -ENOMEM;
3640 	data->args.fhandle = &data->fh;
3641 	data->args.stateid = &data->stateid;
3642 	data->args.bitmask = server->attr_bitmask;
3643 	nfs_copy_fh(&data->fh, NFS_FH(inode));
3644 	memcpy(&data->stateid, stateid, sizeof(data->stateid));
3645 	data->res.fattr = &data->fattr;
3646 	data->res.server = server;
3647 	nfs_fattr_init(data->res.fattr);
3648 	data->timestamp = jiffies;
3649 	data->rpc_status = 0;
3650 
3651 	task_setup_data.callback_data = data;
3652 	msg.rpc_argp = &data->args,
3653 	msg.rpc_resp = &data->res,
3654 	task = rpc_run_task(&task_setup_data);
3655 	if (IS_ERR(task))
3656 		return PTR_ERR(task);
3657 	if (!issync)
3658 		goto out;
3659 	status = nfs4_wait_for_completion_rpc_task(task);
3660 	if (status != 0)
3661 		goto out;
3662 	status = data->rpc_status;
3663 	if (status != 0)
3664 		goto out;
3665 	nfs_refresh_inode(inode, &data->fattr);
3666 out:
3667 	rpc_put_task(task);
3668 	return status;
3669 }
3670 
3671 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3672 {
3673 	struct nfs_server *server = NFS_SERVER(inode);
3674 	struct nfs4_exception exception = { };
3675 	int err;
3676 	do {
3677 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
3678 		switch (err) {
3679 			case -NFS4ERR_STALE_STATEID:
3680 			case -NFS4ERR_EXPIRED:
3681 			case 0:
3682 				return 0;
3683 		}
3684 		err = nfs4_handle_exception(server, err, &exception);
3685 	} while (exception.retry);
3686 	return err;
3687 }
3688 
3689 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
3690 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
3691 
3692 /*
3693  * sleep, with exponential backoff, and retry the LOCK operation.
3694  */
3695 static unsigned long
3696 nfs4_set_lock_task_retry(unsigned long timeout)
3697 {
3698 	schedule_timeout_killable(timeout);
3699 	timeout <<= 1;
3700 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
3701 		return NFS4_LOCK_MAXTIMEOUT;
3702 	return timeout;
3703 }
3704 
3705 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3706 {
3707 	struct inode *inode = state->inode;
3708 	struct nfs_server *server = NFS_SERVER(inode);
3709 	struct nfs_client *clp = server->nfs_client;
3710 	struct nfs_lockt_args arg = {
3711 		.fh = NFS_FH(inode),
3712 		.fl = request,
3713 	};
3714 	struct nfs_lockt_res res = {
3715 		.denied = request,
3716 	};
3717 	struct rpc_message msg = {
3718 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
3719 		.rpc_argp       = &arg,
3720 		.rpc_resp       = &res,
3721 		.rpc_cred	= state->owner->so_cred,
3722 	};
3723 	struct nfs4_lock_state *lsp;
3724 	int status;
3725 
3726 	arg.lock_owner.clientid = clp->cl_clientid;
3727 	status = nfs4_set_lock_state(state, request);
3728 	if (status != 0)
3729 		goto out;
3730 	lsp = request->fl_u.nfs4_fl.owner;
3731 	arg.lock_owner.id = lsp->ls_id.id;
3732 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
3733 	switch (status) {
3734 		case 0:
3735 			request->fl_type = F_UNLCK;
3736 			break;
3737 		case -NFS4ERR_DENIED:
3738 			status = 0;
3739 	}
3740 	request->fl_ops->fl_release_private(request);
3741 out:
3742 	return status;
3743 }
3744 
3745 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3746 {
3747 	struct nfs4_exception exception = { };
3748 	int err;
3749 
3750 	do {
3751 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
3752 				_nfs4_proc_getlk(state, cmd, request),
3753 				&exception);
3754 	} while (exception.retry);
3755 	return err;
3756 }
3757 
3758 static int do_vfs_lock(struct file *file, struct file_lock *fl)
3759 {
3760 	int res = 0;
3761 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
3762 		case FL_POSIX:
3763 			res = posix_lock_file_wait(file, fl);
3764 			break;
3765 		case FL_FLOCK:
3766 			res = flock_lock_file_wait(file, fl);
3767 			break;
3768 		default:
3769 			BUG();
3770 	}
3771 	return res;
3772 }
3773 
3774 struct nfs4_unlockdata {
3775 	struct nfs_locku_args arg;
3776 	struct nfs_locku_res res;
3777 	struct nfs4_lock_state *lsp;
3778 	struct nfs_open_context *ctx;
3779 	struct file_lock fl;
3780 	const struct nfs_server *server;
3781 	unsigned long timestamp;
3782 };
3783 
3784 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
3785 		struct nfs_open_context *ctx,
3786 		struct nfs4_lock_state *lsp,
3787 		struct nfs_seqid *seqid)
3788 {
3789 	struct nfs4_unlockdata *p;
3790 	struct inode *inode = lsp->ls_state->inode;
3791 
3792 	p = kzalloc(sizeof(*p), GFP_NOFS);
3793 	if (p == NULL)
3794 		return NULL;
3795 	p->arg.fh = NFS_FH(inode);
3796 	p->arg.fl = &p->fl;
3797 	p->arg.seqid = seqid;
3798 	p->res.seqid = seqid;
3799 	p->arg.stateid = &lsp->ls_stateid;
3800 	p->lsp = lsp;
3801 	atomic_inc(&lsp->ls_count);
3802 	/* Ensure we don't close file until we're done freeing locks! */
3803 	p->ctx = get_nfs_open_context(ctx);
3804 	memcpy(&p->fl, fl, sizeof(p->fl));
3805 	p->server = NFS_SERVER(inode);
3806 	return p;
3807 }
3808 
3809 static void nfs4_locku_release_calldata(void *data)
3810 {
3811 	struct nfs4_unlockdata *calldata = data;
3812 	nfs_free_seqid(calldata->arg.seqid);
3813 	nfs4_put_lock_state(calldata->lsp);
3814 	put_nfs_open_context(calldata->ctx);
3815 	kfree(calldata);
3816 }
3817 
3818 static void nfs4_locku_done(struct rpc_task *task, void *data)
3819 {
3820 	struct nfs4_unlockdata *calldata = data;
3821 
3822 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3823 		return;
3824 	switch (task->tk_status) {
3825 		case 0:
3826 			memcpy(calldata->lsp->ls_stateid.data,
3827 					calldata->res.stateid.data,
3828 					sizeof(calldata->lsp->ls_stateid.data));
3829 			renew_lease(calldata->server, calldata->timestamp);
3830 			break;
3831 		case -NFS4ERR_BAD_STATEID:
3832 		case -NFS4ERR_OLD_STATEID:
3833 		case -NFS4ERR_STALE_STATEID:
3834 		case -NFS4ERR_EXPIRED:
3835 			break;
3836 		default:
3837 			if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
3838 				nfs_restart_rpc(task,
3839 						 calldata->server->nfs_client);
3840 	}
3841 }
3842 
3843 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
3844 {
3845 	struct nfs4_unlockdata *calldata = data;
3846 
3847 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3848 		return;
3849 	if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
3850 		/* Note: exit _without_ running nfs4_locku_done */
3851 		task->tk_action = NULL;
3852 		return;
3853 	}
3854 	calldata->timestamp = jiffies;
3855 	if (nfs4_setup_sequence(calldata->server,
3856 				&calldata->arg.seq_args,
3857 				&calldata->res.seq_res, 1, task))
3858 		return;
3859 	rpc_call_start(task);
3860 }
3861 
3862 static const struct rpc_call_ops nfs4_locku_ops = {
3863 	.rpc_call_prepare = nfs4_locku_prepare,
3864 	.rpc_call_done = nfs4_locku_done,
3865 	.rpc_release = nfs4_locku_release_calldata,
3866 };
3867 
3868 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
3869 		struct nfs_open_context *ctx,
3870 		struct nfs4_lock_state *lsp,
3871 		struct nfs_seqid *seqid)
3872 {
3873 	struct nfs4_unlockdata *data;
3874 	struct rpc_message msg = {
3875 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
3876 		.rpc_cred = ctx->cred,
3877 	};
3878 	struct rpc_task_setup task_setup_data = {
3879 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
3880 		.rpc_message = &msg,
3881 		.callback_ops = &nfs4_locku_ops,
3882 		.workqueue = nfsiod_workqueue,
3883 		.flags = RPC_TASK_ASYNC,
3884 	};
3885 
3886 	/* Ensure this is an unlock - when canceling a lock, the
3887 	 * canceled lock is passed in, and it won't be an unlock.
3888 	 */
3889 	fl->fl_type = F_UNLCK;
3890 
3891 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
3892 	if (data == NULL) {
3893 		nfs_free_seqid(seqid);
3894 		return ERR_PTR(-ENOMEM);
3895 	}
3896 
3897 	msg.rpc_argp = &data->arg,
3898 	msg.rpc_resp = &data->res,
3899 	task_setup_data.callback_data = data;
3900 	return rpc_run_task(&task_setup_data);
3901 }
3902 
3903 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
3904 {
3905 	struct nfs_inode *nfsi = NFS_I(state->inode);
3906 	struct nfs_seqid *seqid;
3907 	struct nfs4_lock_state *lsp;
3908 	struct rpc_task *task;
3909 	int status = 0;
3910 	unsigned char fl_flags = request->fl_flags;
3911 
3912 	status = nfs4_set_lock_state(state, request);
3913 	/* Unlock _before_ we do the RPC call */
3914 	request->fl_flags |= FL_EXISTS;
3915 	down_read(&nfsi->rwsem);
3916 	if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
3917 		up_read(&nfsi->rwsem);
3918 		goto out;
3919 	}
3920 	up_read(&nfsi->rwsem);
3921 	if (status != 0)
3922 		goto out;
3923 	/* Is this a delegated lock? */
3924 	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3925 		goto out;
3926 	lsp = request->fl_u.nfs4_fl.owner;
3927 	seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
3928 	status = -ENOMEM;
3929 	if (seqid == NULL)
3930 		goto out;
3931 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
3932 	status = PTR_ERR(task);
3933 	if (IS_ERR(task))
3934 		goto out;
3935 	status = nfs4_wait_for_completion_rpc_task(task);
3936 	rpc_put_task(task);
3937 out:
3938 	request->fl_flags = fl_flags;
3939 	return status;
3940 }
3941 
3942 struct nfs4_lockdata {
3943 	struct nfs_lock_args arg;
3944 	struct nfs_lock_res res;
3945 	struct nfs4_lock_state *lsp;
3946 	struct nfs_open_context *ctx;
3947 	struct file_lock fl;
3948 	unsigned long timestamp;
3949 	int rpc_status;
3950 	int cancelled;
3951 	struct nfs_server *server;
3952 };
3953 
3954 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
3955 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
3956 		gfp_t gfp_mask)
3957 {
3958 	struct nfs4_lockdata *p;
3959 	struct inode *inode = lsp->ls_state->inode;
3960 	struct nfs_server *server = NFS_SERVER(inode);
3961 
3962 	p = kzalloc(sizeof(*p), gfp_mask);
3963 	if (p == NULL)
3964 		return NULL;
3965 
3966 	p->arg.fh = NFS_FH(inode);
3967 	p->arg.fl = &p->fl;
3968 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
3969 	if (p->arg.open_seqid == NULL)
3970 		goto out_free;
3971 	p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
3972 	if (p->arg.lock_seqid == NULL)
3973 		goto out_free_seqid;
3974 	p->arg.lock_stateid = &lsp->ls_stateid;
3975 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
3976 	p->arg.lock_owner.id = lsp->ls_id.id;
3977 	p->res.lock_seqid = p->arg.lock_seqid;
3978 	p->lsp = lsp;
3979 	p->server = server;
3980 	atomic_inc(&lsp->ls_count);
3981 	p->ctx = get_nfs_open_context(ctx);
3982 	memcpy(&p->fl, fl, sizeof(p->fl));
3983 	return p;
3984 out_free_seqid:
3985 	nfs_free_seqid(p->arg.open_seqid);
3986 out_free:
3987 	kfree(p);
3988 	return NULL;
3989 }
3990 
3991 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
3992 {
3993 	struct nfs4_lockdata *data = calldata;
3994 	struct nfs4_state *state = data->lsp->ls_state;
3995 
3996 	dprintk("%s: begin!\n", __func__);
3997 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
3998 		return;
3999 	/* Do we need to do an open_to_lock_owner? */
4000 	if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4001 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4002 			return;
4003 		data->arg.open_stateid = &state->stateid;
4004 		data->arg.new_lock_owner = 1;
4005 		data->res.open_seqid = data->arg.open_seqid;
4006 	} else
4007 		data->arg.new_lock_owner = 0;
4008 	data->timestamp = jiffies;
4009 	if (nfs4_setup_sequence(data->server,
4010 				&data->arg.seq_args,
4011 				&data->res.seq_res, 1, task))
4012 		return;
4013 	rpc_call_start(task);
4014 	dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4015 }
4016 
4017 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4018 {
4019 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4020 	nfs4_lock_prepare(task, calldata);
4021 }
4022 
4023 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4024 {
4025 	struct nfs4_lockdata *data = calldata;
4026 
4027 	dprintk("%s: begin!\n", __func__);
4028 
4029 	if (!nfs4_sequence_done(task, &data->res.seq_res))
4030 		return;
4031 
4032 	data->rpc_status = task->tk_status;
4033 	if (data->arg.new_lock_owner != 0) {
4034 		if (data->rpc_status == 0)
4035 			nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4036 		else
4037 			goto out;
4038 	}
4039 	if (data->rpc_status == 0) {
4040 		memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
4041 					sizeof(data->lsp->ls_stateid.data));
4042 		data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4043 		renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
4044 	}
4045 out:
4046 	dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4047 }
4048 
4049 static void nfs4_lock_release(void *calldata)
4050 {
4051 	struct nfs4_lockdata *data = calldata;
4052 
4053 	dprintk("%s: begin!\n", __func__);
4054 	nfs_free_seqid(data->arg.open_seqid);
4055 	if (data->cancelled != 0) {
4056 		struct rpc_task *task;
4057 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4058 				data->arg.lock_seqid);
4059 		if (!IS_ERR(task))
4060 			rpc_put_task(task);
4061 		dprintk("%s: cancelling lock!\n", __func__);
4062 	} else
4063 		nfs_free_seqid(data->arg.lock_seqid);
4064 	nfs4_put_lock_state(data->lsp);
4065 	put_nfs_open_context(data->ctx);
4066 	kfree(data);
4067 	dprintk("%s: done!\n", __func__);
4068 }
4069 
4070 static const struct rpc_call_ops nfs4_lock_ops = {
4071 	.rpc_call_prepare = nfs4_lock_prepare,
4072 	.rpc_call_done = nfs4_lock_done,
4073 	.rpc_release = nfs4_lock_release,
4074 };
4075 
4076 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4077 	.rpc_call_prepare = nfs4_recover_lock_prepare,
4078 	.rpc_call_done = nfs4_lock_done,
4079 	.rpc_release = nfs4_lock_release,
4080 };
4081 
4082 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4083 {
4084 	struct nfs_client *clp = server->nfs_client;
4085 	struct nfs4_state *state = lsp->ls_state;
4086 
4087 	switch (error) {
4088 	case -NFS4ERR_ADMIN_REVOKED:
4089 	case -NFS4ERR_BAD_STATEID:
4090 	case -NFS4ERR_EXPIRED:
4091 		if (new_lock_owner != 0 ||
4092 		   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4093 			nfs4_state_mark_reclaim_nograce(clp, state);
4094 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4095 		break;
4096 	case -NFS4ERR_STALE_STATEID:
4097 		if (new_lock_owner != 0 ||
4098 		    (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4099 			nfs4_state_mark_reclaim_reboot(clp, state);
4100 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4101 	};
4102 }
4103 
4104 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4105 {
4106 	struct nfs4_lockdata *data;
4107 	struct rpc_task *task;
4108 	struct rpc_message msg = {
4109 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4110 		.rpc_cred = state->owner->so_cred,
4111 	};
4112 	struct rpc_task_setup task_setup_data = {
4113 		.rpc_client = NFS_CLIENT(state->inode),
4114 		.rpc_message = &msg,
4115 		.callback_ops = &nfs4_lock_ops,
4116 		.workqueue = nfsiod_workqueue,
4117 		.flags = RPC_TASK_ASYNC,
4118 	};
4119 	int ret;
4120 
4121 	dprintk("%s: begin!\n", __func__);
4122 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4123 			fl->fl_u.nfs4_fl.owner,
4124 			recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4125 	if (data == NULL)
4126 		return -ENOMEM;
4127 	if (IS_SETLKW(cmd))
4128 		data->arg.block = 1;
4129 	if (recovery_type > NFS_LOCK_NEW) {
4130 		if (recovery_type == NFS_LOCK_RECLAIM)
4131 			data->arg.reclaim = NFS_LOCK_RECLAIM;
4132 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4133 	}
4134 	msg.rpc_argp = &data->arg,
4135 	msg.rpc_resp = &data->res,
4136 	task_setup_data.callback_data = data;
4137 	task = rpc_run_task(&task_setup_data);
4138 	if (IS_ERR(task))
4139 		return PTR_ERR(task);
4140 	ret = nfs4_wait_for_completion_rpc_task(task);
4141 	if (ret == 0) {
4142 		ret = data->rpc_status;
4143 		if (ret)
4144 			nfs4_handle_setlk_error(data->server, data->lsp,
4145 					data->arg.new_lock_owner, ret);
4146 	} else
4147 		data->cancelled = 1;
4148 	rpc_put_task(task);
4149 	dprintk("%s: done, ret = %d!\n", __func__, ret);
4150 	return ret;
4151 }
4152 
4153 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4154 {
4155 	struct nfs_server *server = NFS_SERVER(state->inode);
4156 	struct nfs4_exception exception = { };
4157 	int err;
4158 
4159 	do {
4160 		/* Cache the lock if possible... */
4161 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4162 			return 0;
4163 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4164 		if (err != -NFS4ERR_DELAY)
4165 			break;
4166 		nfs4_handle_exception(server, err, &exception);
4167 	} while (exception.retry);
4168 	return err;
4169 }
4170 
4171 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4172 {
4173 	struct nfs_server *server = NFS_SERVER(state->inode);
4174 	struct nfs4_exception exception = { };
4175 	int err;
4176 
4177 	err = nfs4_set_lock_state(state, request);
4178 	if (err != 0)
4179 		return err;
4180 	do {
4181 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4182 			return 0;
4183 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4184 		switch (err) {
4185 		default:
4186 			goto out;
4187 		case -NFS4ERR_GRACE:
4188 		case -NFS4ERR_DELAY:
4189 			nfs4_handle_exception(server, err, &exception);
4190 			err = 0;
4191 		}
4192 	} while (exception.retry);
4193 out:
4194 	return err;
4195 }
4196 
4197 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4198 {
4199 	struct nfs_inode *nfsi = NFS_I(state->inode);
4200 	unsigned char fl_flags = request->fl_flags;
4201 	int status = -ENOLCK;
4202 
4203 	if ((fl_flags & FL_POSIX) &&
4204 			!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4205 		goto out;
4206 	/* Is this a delegated open? */
4207 	status = nfs4_set_lock_state(state, request);
4208 	if (status != 0)
4209 		goto out;
4210 	request->fl_flags |= FL_ACCESS;
4211 	status = do_vfs_lock(request->fl_file, request);
4212 	if (status < 0)
4213 		goto out;
4214 	down_read(&nfsi->rwsem);
4215 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4216 		/* Yes: cache locks! */
4217 		/* ...but avoid races with delegation recall... */
4218 		request->fl_flags = fl_flags & ~FL_SLEEP;
4219 		status = do_vfs_lock(request->fl_file, request);
4220 		goto out_unlock;
4221 	}
4222 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4223 	if (status != 0)
4224 		goto out_unlock;
4225 	/* Note: we always want to sleep here! */
4226 	request->fl_flags = fl_flags | FL_SLEEP;
4227 	if (do_vfs_lock(request->fl_file, request) < 0)
4228 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
4229 out_unlock:
4230 	up_read(&nfsi->rwsem);
4231 out:
4232 	request->fl_flags = fl_flags;
4233 	return status;
4234 }
4235 
4236 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4237 {
4238 	struct nfs4_exception exception = { };
4239 	int err;
4240 
4241 	do {
4242 		err = _nfs4_proc_setlk(state, cmd, request);
4243 		if (err == -NFS4ERR_DENIED)
4244 			err = -EAGAIN;
4245 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4246 				err, &exception);
4247 	} while (exception.retry);
4248 	return err;
4249 }
4250 
4251 static int
4252 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4253 {
4254 	struct nfs_open_context *ctx;
4255 	struct nfs4_state *state;
4256 	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4257 	int status;
4258 
4259 	/* verify open state */
4260 	ctx = nfs_file_open_context(filp);
4261 	state = ctx->state;
4262 
4263 	if (request->fl_start < 0 || request->fl_end < 0)
4264 		return -EINVAL;
4265 
4266 	if (IS_GETLK(cmd)) {
4267 		if (state != NULL)
4268 			return nfs4_proc_getlk(state, F_GETLK, request);
4269 		return 0;
4270 	}
4271 
4272 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4273 		return -EINVAL;
4274 
4275 	if (request->fl_type == F_UNLCK) {
4276 		if (state != NULL)
4277 			return nfs4_proc_unlck(state, cmd, request);
4278 		return 0;
4279 	}
4280 
4281 	if (state == NULL)
4282 		return -ENOLCK;
4283 	do {
4284 		status = nfs4_proc_setlk(state, cmd, request);
4285 		if ((status != -EAGAIN) || IS_SETLK(cmd))
4286 			break;
4287 		timeout = nfs4_set_lock_task_retry(timeout);
4288 		status = -ERESTARTSYS;
4289 		if (signalled())
4290 			break;
4291 	} while(status < 0);
4292 	return status;
4293 }
4294 
4295 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4296 {
4297 	struct nfs_server *server = NFS_SERVER(state->inode);
4298 	struct nfs4_exception exception = { };
4299 	int err;
4300 
4301 	err = nfs4_set_lock_state(state, fl);
4302 	if (err != 0)
4303 		goto out;
4304 	do {
4305 		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4306 		switch (err) {
4307 			default:
4308 				printk(KERN_ERR "%s: unhandled error %d.\n",
4309 						__func__, err);
4310 			case 0:
4311 			case -ESTALE:
4312 				goto out;
4313 			case -NFS4ERR_EXPIRED:
4314 			case -NFS4ERR_STALE_CLIENTID:
4315 			case -NFS4ERR_STALE_STATEID:
4316 			case -NFS4ERR_BADSESSION:
4317 			case -NFS4ERR_BADSLOT:
4318 			case -NFS4ERR_BAD_HIGH_SLOT:
4319 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4320 			case -NFS4ERR_DEADSESSION:
4321 				nfs4_schedule_state_recovery(server->nfs_client);
4322 				goto out;
4323 			case -ERESTARTSYS:
4324 				/*
4325 				 * The show must go on: exit, but mark the
4326 				 * stateid as needing recovery.
4327 				 */
4328 			case -NFS4ERR_ADMIN_REVOKED:
4329 			case -NFS4ERR_BAD_STATEID:
4330 			case -NFS4ERR_OPENMODE:
4331 				nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
4332 				err = 0;
4333 				goto out;
4334 			case -EKEYEXPIRED:
4335 				/*
4336 				 * User RPCSEC_GSS context has expired.
4337 				 * We cannot recover this stateid now, so
4338 				 * skip it and allow recovery thread to
4339 				 * proceed.
4340 				 */
4341 				err = 0;
4342 				goto out;
4343 			case -ENOMEM:
4344 			case -NFS4ERR_DENIED:
4345 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
4346 				err = 0;
4347 				goto out;
4348 			case -NFS4ERR_DELAY:
4349 				break;
4350 		}
4351 		err = nfs4_handle_exception(server, err, &exception);
4352 	} while (exception.retry);
4353 out:
4354 	return err;
4355 }
4356 
4357 static void nfs4_release_lockowner_release(void *calldata)
4358 {
4359 	kfree(calldata);
4360 }
4361 
4362 const struct rpc_call_ops nfs4_release_lockowner_ops = {
4363 	.rpc_release = nfs4_release_lockowner_release,
4364 };
4365 
4366 void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
4367 {
4368 	struct nfs_server *server = lsp->ls_state->owner->so_server;
4369 	struct nfs_release_lockowner_args *args;
4370 	struct rpc_message msg = {
4371 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4372 	};
4373 
4374 	if (server->nfs_client->cl_mvops->minor_version != 0)
4375 		return;
4376 	args = kmalloc(sizeof(*args), GFP_NOFS);
4377 	if (!args)
4378 		return;
4379 	args->lock_owner.clientid = server->nfs_client->cl_clientid;
4380 	args->lock_owner.id = lsp->ls_id.id;
4381 	msg.rpc_argp = args;
4382 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
4383 }
4384 
4385 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4386 
4387 int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
4388 		size_t buflen, int flags)
4389 {
4390 	struct inode *inode = dentry->d_inode;
4391 
4392 	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
4393 		return -EOPNOTSUPP;
4394 
4395 	return nfs4_proc_set_acl(inode, buf, buflen);
4396 }
4397 
4398 /* The getxattr man page suggests returning -ENODATA for unknown attributes,
4399  * and that's what we'll do for e.g. user attributes that haven't been set.
4400  * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
4401  * attributes in kernel-managed attribute namespaces. */
4402 ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
4403 		size_t buflen)
4404 {
4405 	struct inode *inode = dentry->d_inode;
4406 
4407 	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
4408 		return -EOPNOTSUPP;
4409 
4410 	return nfs4_proc_get_acl(inode, buf, buflen);
4411 }
4412 
4413 ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
4414 {
4415 	size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
4416 
4417 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4418 		return 0;
4419 	if (buf && buflen < len)
4420 		return -ERANGE;
4421 	if (buf)
4422 		memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
4423 	return len;
4424 }
4425 
4426 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4427 {
4428 	if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
4429 		(fattr->valid & NFS_ATTR_FATTR_FSID) &&
4430 		(fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
4431 		return;
4432 
4433 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4434 		NFS_ATTR_FATTR_NLINK;
4435 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4436 	fattr->nlink = 2;
4437 }
4438 
4439 int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4440 		struct nfs4_fs_locations *fs_locations, struct page *page)
4441 {
4442 	struct nfs_server *server = NFS_SERVER(dir);
4443 	u32 bitmask[2] = {
4444 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4445 		[1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
4446 	};
4447 	struct nfs4_fs_locations_arg args = {
4448 		.dir_fh = NFS_FH(dir),
4449 		.name = name,
4450 		.page = page,
4451 		.bitmask = bitmask,
4452 	};
4453 	struct nfs4_fs_locations_res res = {
4454 		.fs_locations = fs_locations,
4455 	};
4456 	struct rpc_message msg = {
4457 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4458 		.rpc_argp = &args,
4459 		.rpc_resp = &res,
4460 	};
4461 	int status;
4462 
4463 	dprintk("%s: start\n", __func__);
4464 	nfs_fattr_init(&fs_locations->fattr);
4465 	fs_locations->server = server;
4466 	fs_locations->nlocations = 0;
4467 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
4468 	nfs_fixup_referral_attributes(&fs_locations->fattr);
4469 	dprintk("%s: returned status = %d\n", __func__, status);
4470 	return status;
4471 }
4472 
4473 #ifdef CONFIG_NFS_V4_1
4474 /*
4475  * nfs4_proc_exchange_id()
4476  *
4477  * Since the clientid has expired, all compounds using sessions
4478  * associated with the stale clientid will be returning
4479  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
4480  * be in some phase of session reset.
4481  */
4482 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4483 {
4484 	nfs4_verifier verifier;
4485 	struct nfs41_exchange_id_args args = {
4486 		.client = clp,
4487 		.flags = clp->cl_exchange_flags,
4488 	};
4489 	struct nfs41_exchange_id_res res = {
4490 		.client = clp,
4491 	};
4492 	int status;
4493 	struct rpc_message msg = {
4494 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
4495 		.rpc_argp = &args,
4496 		.rpc_resp = &res,
4497 		.rpc_cred = cred,
4498 	};
4499 	__be32 *p;
4500 
4501 	dprintk("--> %s\n", __func__);
4502 	BUG_ON(clp == NULL);
4503 
4504 	/* Remove server-only flags */
4505 	args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
4506 
4507 	p = (u32 *)verifier.data;
4508 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
4509 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
4510 	args.verifier = &verifier;
4511 
4512 	while (1) {
4513 		args.id_len = scnprintf(args.id, sizeof(args.id),
4514 					"%s/%s %u",
4515 					clp->cl_ipaddr,
4516 					rpc_peeraddr2str(clp->cl_rpcclient,
4517 							 RPC_DISPLAY_ADDR),
4518 					clp->cl_id_uniquifier);
4519 
4520 		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
4521 
4522 		if (status != -NFS4ERR_CLID_INUSE)
4523 			break;
4524 
4525 		if (signalled())
4526 			break;
4527 
4528 		if (++clp->cl_id_uniquifier == 0)
4529 			break;
4530 	}
4531 
4532 	dprintk("<-- %s status= %d\n", __func__, status);
4533 	return status;
4534 }
4535 
4536 struct nfs4_get_lease_time_data {
4537 	struct nfs4_get_lease_time_args *args;
4538 	struct nfs4_get_lease_time_res *res;
4539 	struct nfs_client *clp;
4540 };
4541 
4542 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
4543 					void *calldata)
4544 {
4545 	int ret;
4546 	struct nfs4_get_lease_time_data *data =
4547 			(struct nfs4_get_lease_time_data *)calldata;
4548 
4549 	dprintk("--> %s\n", __func__);
4550 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4551 	/* just setup sequence, do not trigger session recovery
4552 	   since we're invoked within one */
4553 	ret = nfs41_setup_sequence(data->clp->cl_session,
4554 				   &data->args->la_seq_args,
4555 				   &data->res->lr_seq_res, 0, task);
4556 
4557 	BUG_ON(ret == -EAGAIN);
4558 	rpc_call_start(task);
4559 	dprintk("<-- %s\n", __func__);
4560 }
4561 
4562 /*
4563  * Called from nfs4_state_manager thread for session setup, so don't recover
4564  * from sequence operation or clientid errors.
4565  */
4566 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4567 {
4568 	struct nfs4_get_lease_time_data *data =
4569 			(struct nfs4_get_lease_time_data *)calldata;
4570 
4571 	dprintk("--> %s\n", __func__);
4572 	if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
4573 		return;
4574 	switch (task->tk_status) {
4575 	case -NFS4ERR_DELAY:
4576 	case -NFS4ERR_GRACE:
4577 		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
4578 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
4579 		task->tk_status = 0;
4580 		nfs_restart_rpc(task, data->clp);
4581 		return;
4582 	}
4583 	dprintk("<-- %s\n", __func__);
4584 }
4585 
4586 struct rpc_call_ops nfs4_get_lease_time_ops = {
4587 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
4588 	.rpc_call_done = nfs4_get_lease_time_done,
4589 };
4590 
4591 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
4592 {
4593 	struct rpc_task *task;
4594 	struct nfs4_get_lease_time_args args;
4595 	struct nfs4_get_lease_time_res res = {
4596 		.lr_fsinfo = fsinfo,
4597 	};
4598 	struct nfs4_get_lease_time_data data = {
4599 		.args = &args,
4600 		.res = &res,
4601 		.clp = clp,
4602 	};
4603 	struct rpc_message msg = {
4604 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
4605 		.rpc_argp = &args,
4606 		.rpc_resp = &res,
4607 	};
4608 	struct rpc_task_setup task_setup = {
4609 		.rpc_client = clp->cl_rpcclient,
4610 		.rpc_message = &msg,
4611 		.callback_ops = &nfs4_get_lease_time_ops,
4612 		.callback_data = &data
4613 	};
4614 	int status;
4615 
4616 	dprintk("--> %s\n", __func__);
4617 	task = rpc_run_task(&task_setup);
4618 
4619 	if (IS_ERR(task))
4620 		status = PTR_ERR(task);
4621 	else {
4622 		status = task->tk_status;
4623 		rpc_put_task(task);
4624 	}
4625 	dprintk("<-- %s return %d\n", __func__, status);
4626 
4627 	return status;
4628 }
4629 
4630 /*
4631  * Reset a slot table
4632  */
4633 static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
4634 				 int ivalue)
4635 {
4636 	struct nfs4_slot *new = NULL;
4637 	int i;
4638 	int ret = 0;
4639 
4640 	dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
4641 		max_reqs, tbl->max_slots);
4642 
4643 	/* Does the newly negotiated max_reqs match the existing slot table? */
4644 	if (max_reqs != tbl->max_slots) {
4645 		ret = -ENOMEM;
4646 		new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
4647 			      GFP_NOFS);
4648 		if (!new)
4649 			goto out;
4650 		ret = 0;
4651 		kfree(tbl->slots);
4652 	}
4653 	spin_lock(&tbl->slot_tbl_lock);
4654 	if (new) {
4655 		tbl->slots = new;
4656 		tbl->max_slots = max_reqs;
4657 	}
4658 	for (i = 0; i < tbl->max_slots; ++i)
4659 		tbl->slots[i].seq_nr = ivalue;
4660 	spin_unlock(&tbl->slot_tbl_lock);
4661 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4662 		tbl, tbl->slots, tbl->max_slots);
4663 out:
4664 	dprintk("<-- %s: return %d\n", __func__, ret);
4665 	return ret;
4666 }
4667 
4668 /*
4669  * Reset the forechannel and backchannel slot tables
4670  */
4671 static int nfs4_reset_slot_tables(struct nfs4_session *session)
4672 {
4673 	int status;
4674 
4675 	status = nfs4_reset_slot_table(&session->fc_slot_table,
4676 			session->fc_attrs.max_reqs, 1);
4677 	if (status)
4678 		return status;
4679 
4680 	status = nfs4_reset_slot_table(&session->bc_slot_table,
4681 			session->bc_attrs.max_reqs, 0);
4682 	return status;
4683 }
4684 
4685 /* Destroy the slot table */
4686 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
4687 {
4688 	if (session->fc_slot_table.slots != NULL) {
4689 		kfree(session->fc_slot_table.slots);
4690 		session->fc_slot_table.slots = NULL;
4691 	}
4692 	if (session->bc_slot_table.slots != NULL) {
4693 		kfree(session->bc_slot_table.slots);
4694 		session->bc_slot_table.slots = NULL;
4695 	}
4696 	return;
4697 }
4698 
4699 /*
4700  * Initialize slot table
4701  */
4702 static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
4703 		int max_slots, int ivalue)
4704 {
4705 	struct nfs4_slot *slot;
4706 	int ret = -ENOMEM;
4707 
4708 	BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
4709 
4710 	dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
4711 
4712 	slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
4713 	if (!slot)
4714 		goto out;
4715 	ret = 0;
4716 
4717 	spin_lock(&tbl->slot_tbl_lock);
4718 	tbl->max_slots = max_slots;
4719 	tbl->slots = slot;
4720 	tbl->highest_used_slotid = -1;  /* no slot is currently used */
4721 	spin_unlock(&tbl->slot_tbl_lock);
4722 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4723 		tbl, tbl->slots, tbl->max_slots);
4724 out:
4725 	dprintk("<-- %s: return %d\n", __func__, ret);
4726 	return ret;
4727 }
4728 
4729 /*
4730  * Initialize the forechannel and backchannel tables
4731  */
4732 static int nfs4_init_slot_tables(struct nfs4_session *session)
4733 {
4734 	struct nfs4_slot_table *tbl;
4735 	int status = 0;
4736 
4737 	tbl = &session->fc_slot_table;
4738 	if (tbl->slots == NULL) {
4739 		status = nfs4_init_slot_table(tbl,
4740 				session->fc_attrs.max_reqs, 1);
4741 		if (status)
4742 			return status;
4743 	}
4744 
4745 	tbl = &session->bc_slot_table;
4746 	if (tbl->slots == NULL) {
4747 		status = nfs4_init_slot_table(tbl,
4748 				session->bc_attrs.max_reqs, 0);
4749 		if (status)
4750 			nfs4_destroy_slot_tables(session);
4751 	}
4752 
4753 	return status;
4754 }
4755 
4756 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4757 {
4758 	struct nfs4_session *session;
4759 	struct nfs4_slot_table *tbl;
4760 
4761 	session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
4762 	if (!session)
4763 		return NULL;
4764 
4765 	init_completion(&session->complete);
4766 
4767 	tbl = &session->fc_slot_table;
4768 	tbl->highest_used_slotid = -1;
4769 	spin_lock_init(&tbl->slot_tbl_lock);
4770 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
4771 
4772 	tbl = &session->bc_slot_table;
4773 	tbl->highest_used_slotid = -1;
4774 	spin_lock_init(&tbl->slot_tbl_lock);
4775 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
4776 
4777 	session->session_state = 1<<NFS4_SESSION_INITING;
4778 
4779 	session->clp = clp;
4780 	return session;
4781 }
4782 
4783 void nfs4_destroy_session(struct nfs4_session *session)
4784 {
4785 	nfs4_proc_destroy_session(session);
4786 	dprintk("%s Destroy backchannel for xprt %p\n",
4787 		__func__, session->clp->cl_rpcclient->cl_xprt);
4788 	xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
4789 				NFS41_BC_MIN_CALLBACKS);
4790 	nfs4_destroy_slot_tables(session);
4791 	kfree(session);
4792 }
4793 
4794 /*
4795  * Initialize the values to be used by the client in CREATE_SESSION
4796  * If nfs4_init_session set the fore channel request and response sizes,
4797  * use them.
4798  *
4799  * Set the back channel max_resp_sz_cached to zero to force the client to
4800  * always set csa_cachethis to FALSE because the current implementation
4801  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
4802  */
4803 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
4804 {
4805 	struct nfs4_session *session = args->client->cl_session;
4806 	unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
4807 		     mxresp_sz = session->fc_attrs.max_resp_sz;
4808 
4809 	if (mxrqst_sz == 0)
4810 		mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
4811 	if (mxresp_sz == 0)
4812 		mxresp_sz = NFS_MAX_FILE_IO_SIZE;
4813 	/* Fore channel attributes */
4814 	args->fc_attrs.headerpadsz = 0;
4815 	args->fc_attrs.max_rqst_sz = mxrqst_sz;
4816 	args->fc_attrs.max_resp_sz = mxresp_sz;
4817 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
4818 	args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
4819 
4820 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
4821 		"max_ops=%u max_reqs=%u\n",
4822 		__func__,
4823 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
4824 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
4825 
4826 	/* Back channel attributes */
4827 	args->bc_attrs.headerpadsz = 0;
4828 	args->bc_attrs.max_rqst_sz = PAGE_SIZE;
4829 	args->bc_attrs.max_resp_sz = PAGE_SIZE;
4830 	args->bc_attrs.max_resp_sz_cached = 0;
4831 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
4832 	args->bc_attrs.max_reqs = 1;
4833 
4834 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
4835 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
4836 		__func__,
4837 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
4838 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
4839 		args->bc_attrs.max_reqs);
4840 }
4841 
4842 static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
4843 {
4844 	if (rcvd <= sent)
4845 		return 0;
4846 	printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
4847 		"sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
4848 	return -EINVAL;
4849 }
4850 
4851 #define _verify_fore_channel_attr(_name_) \
4852 	_verify_channel_attr("fore", #_name_, \
4853 			     args->fc_attrs._name_, \
4854 			     session->fc_attrs._name_)
4855 
4856 #define _verify_back_channel_attr(_name_) \
4857 	_verify_channel_attr("back", #_name_, \
4858 			     args->bc_attrs._name_, \
4859 			     session->bc_attrs._name_)
4860 
4861 /*
4862  * The server is not allowed to increase the fore channel header pad size,
4863  * maximum response size, or maximum number of operations.
4864  *
4865  * The back channel attributes are only negotiatied down: We send what the
4866  * (back channel) server insists upon.
4867  */
4868 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
4869 				     struct nfs4_session *session)
4870 {
4871 	int ret = 0;
4872 
4873 	ret |= _verify_fore_channel_attr(headerpadsz);
4874 	ret |= _verify_fore_channel_attr(max_resp_sz);
4875 	ret |= _verify_fore_channel_attr(max_ops);
4876 
4877 	ret |= _verify_back_channel_attr(headerpadsz);
4878 	ret |= _verify_back_channel_attr(max_rqst_sz);
4879 	ret |= _verify_back_channel_attr(max_resp_sz);
4880 	ret |= _verify_back_channel_attr(max_resp_sz_cached);
4881 	ret |= _verify_back_channel_attr(max_ops);
4882 	ret |= _verify_back_channel_attr(max_reqs);
4883 
4884 	return ret;
4885 }
4886 
4887 static int _nfs4_proc_create_session(struct nfs_client *clp)
4888 {
4889 	struct nfs4_session *session = clp->cl_session;
4890 	struct nfs41_create_session_args args = {
4891 		.client = clp,
4892 		.cb_program = NFS4_CALLBACK,
4893 	};
4894 	struct nfs41_create_session_res res = {
4895 		.client = clp,
4896 	};
4897 	struct rpc_message msg = {
4898 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
4899 		.rpc_argp = &args,
4900 		.rpc_resp = &res,
4901 	};
4902 	int status;
4903 
4904 	nfs4_init_channel_attrs(&args);
4905 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
4906 
4907 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
4908 
4909 	if (!status)
4910 		/* Verify the session's negotiated channel_attrs values */
4911 		status = nfs4_verify_channel_attrs(&args, session);
4912 	if (!status) {
4913 		/* Increment the clientid slot sequence id */
4914 		clp->cl_seqid++;
4915 	}
4916 
4917 	return status;
4918 }
4919 
4920 /*
4921  * Issues a CREATE_SESSION operation to the server.
4922  * It is the responsibility of the caller to verify the session is
4923  * expired before calling this routine.
4924  */
4925 int nfs4_proc_create_session(struct nfs_client *clp)
4926 {
4927 	int status;
4928 	unsigned *ptr;
4929 	struct nfs4_session *session = clp->cl_session;
4930 
4931 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
4932 
4933 	status = _nfs4_proc_create_session(clp);
4934 	if (status)
4935 		goto out;
4936 
4937 	/* Init and reset the fore channel */
4938 	status = nfs4_init_slot_tables(session);
4939 	dprintk("slot table initialization returned %d\n", status);
4940 	if (status)
4941 		goto out;
4942 	status = nfs4_reset_slot_tables(session);
4943 	dprintk("slot table reset returned %d\n", status);
4944 	if (status)
4945 		goto out;
4946 
4947 	ptr = (unsigned *)&session->sess_id.data[0];
4948 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
4949 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
4950 out:
4951 	dprintk("<-- %s\n", __func__);
4952 	return status;
4953 }
4954 
4955 /*
4956  * Issue the over-the-wire RPC DESTROY_SESSION.
4957  * The caller must serialize access to this routine.
4958  */
4959 int nfs4_proc_destroy_session(struct nfs4_session *session)
4960 {
4961 	int status = 0;
4962 	struct rpc_message msg;
4963 
4964 	dprintk("--> nfs4_proc_destroy_session\n");
4965 
4966 	/* session is still being setup */
4967 	if (session->clp->cl_cons_state != NFS_CS_READY)
4968 		return status;
4969 
4970 	msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
4971 	msg.rpc_argp = session;
4972 	msg.rpc_resp = NULL;
4973 	msg.rpc_cred = NULL;
4974 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
4975 
4976 	if (status)
4977 		printk(KERN_WARNING
4978 			"Got error %d from the server on DESTROY_SESSION. "
4979 			"Session has been destroyed regardless...\n", status);
4980 
4981 	dprintk("<-- nfs4_proc_destroy_session\n");
4982 	return status;
4983 }
4984 
4985 int nfs4_init_session(struct nfs_server *server)
4986 {
4987 	struct nfs_client *clp = server->nfs_client;
4988 	struct nfs4_session *session;
4989 	unsigned int rsize, wsize;
4990 	int ret;
4991 
4992 	if (!nfs4_has_session(clp))
4993 		return 0;
4994 
4995 	session = clp->cl_session;
4996 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
4997 		return 0;
4998 
4999 	rsize = server->rsize;
5000 	if (rsize == 0)
5001 		rsize = NFS_MAX_FILE_IO_SIZE;
5002 	wsize = server->wsize;
5003 	if (wsize == 0)
5004 		wsize = NFS_MAX_FILE_IO_SIZE;
5005 
5006 	session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5007 	session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5008 
5009 	ret = nfs4_recover_expired_lease(server);
5010 	if (!ret)
5011 		ret = nfs4_check_client_ready(clp);
5012 	return ret;
5013 }
5014 
5015 /*
5016  * Renew the cl_session lease.
5017  */
5018 struct nfs4_sequence_data {
5019 	struct nfs_client *clp;
5020 	struct nfs4_sequence_args args;
5021 	struct nfs4_sequence_res res;
5022 };
5023 
5024 static void nfs41_sequence_release(void *data)
5025 {
5026 	struct nfs4_sequence_data *calldata = data;
5027 	struct nfs_client *clp = calldata->clp;
5028 
5029 	if (atomic_read(&clp->cl_count) > 1)
5030 		nfs4_schedule_state_renewal(clp);
5031 	nfs_put_client(clp);
5032 	kfree(calldata);
5033 }
5034 
5035 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5036 {
5037 	switch(task->tk_status) {
5038 	case -NFS4ERR_DELAY:
5039 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5040 		return -EAGAIN;
5041 	default:
5042 		nfs4_schedule_state_recovery(clp);
5043 	}
5044 	return 0;
5045 }
5046 
5047 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5048 {
5049 	struct nfs4_sequence_data *calldata = data;
5050 	struct nfs_client *clp = calldata->clp;
5051 
5052 	if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5053 		return;
5054 
5055 	if (task->tk_status < 0) {
5056 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
5057 		if (atomic_read(&clp->cl_count) == 1)
5058 			goto out;
5059 
5060 		if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5061 			rpc_restart_call_prepare(task);
5062 			return;
5063 		}
5064 	}
5065 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5066 out:
5067 	dprintk("<-- %s\n", __func__);
5068 }
5069 
5070 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5071 {
5072 	struct nfs4_sequence_data *calldata = data;
5073 	struct nfs_client *clp = calldata->clp;
5074 	struct nfs4_sequence_args *args;
5075 	struct nfs4_sequence_res *res;
5076 
5077 	args = task->tk_msg.rpc_argp;
5078 	res = task->tk_msg.rpc_resp;
5079 
5080 	if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
5081 		return;
5082 	rpc_call_start(task);
5083 }
5084 
5085 static const struct rpc_call_ops nfs41_sequence_ops = {
5086 	.rpc_call_done = nfs41_sequence_call_done,
5087 	.rpc_call_prepare = nfs41_sequence_prepare,
5088 	.rpc_release = nfs41_sequence_release,
5089 };
5090 
5091 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5092 {
5093 	struct nfs4_sequence_data *calldata;
5094 	struct rpc_message msg = {
5095 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5096 		.rpc_cred = cred,
5097 	};
5098 	struct rpc_task_setup task_setup_data = {
5099 		.rpc_client = clp->cl_rpcclient,
5100 		.rpc_message = &msg,
5101 		.callback_ops = &nfs41_sequence_ops,
5102 		.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5103 	};
5104 
5105 	if (!atomic_inc_not_zero(&clp->cl_count))
5106 		return ERR_PTR(-EIO);
5107 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5108 	if (calldata == NULL) {
5109 		nfs_put_client(clp);
5110 		return ERR_PTR(-ENOMEM);
5111 	}
5112 	msg.rpc_argp = &calldata->args;
5113 	msg.rpc_resp = &calldata->res;
5114 	calldata->clp = clp;
5115 	task_setup_data.callback_data = calldata;
5116 
5117 	return rpc_run_task(&task_setup_data);
5118 }
5119 
5120 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5121 {
5122 	struct rpc_task *task;
5123 	int ret = 0;
5124 
5125 	task = _nfs41_proc_sequence(clp, cred);
5126 	if (IS_ERR(task))
5127 		ret = PTR_ERR(task);
5128 	else
5129 		rpc_put_task(task);
5130 	dprintk("<-- %s status=%d\n", __func__, ret);
5131 	return ret;
5132 }
5133 
5134 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5135 {
5136 	struct rpc_task *task;
5137 	int ret;
5138 
5139 	task = _nfs41_proc_sequence(clp, cred);
5140 	if (IS_ERR(task)) {
5141 		ret = PTR_ERR(task);
5142 		goto out;
5143 	}
5144 	ret = rpc_wait_for_completion_task(task);
5145 	if (!ret)
5146 		ret = task->tk_status;
5147 	rpc_put_task(task);
5148 out:
5149 	dprintk("<-- %s status=%d\n", __func__, ret);
5150 	return ret;
5151 }
5152 
5153 struct nfs4_reclaim_complete_data {
5154 	struct nfs_client *clp;
5155 	struct nfs41_reclaim_complete_args arg;
5156 	struct nfs41_reclaim_complete_res res;
5157 };
5158 
5159 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5160 {
5161 	struct nfs4_reclaim_complete_data *calldata = data;
5162 
5163 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5164 	if (nfs41_setup_sequence(calldata->clp->cl_session,
5165 				&calldata->arg.seq_args,
5166 				&calldata->res.seq_res, 0, task))
5167 		return;
5168 
5169 	rpc_call_start(task);
5170 }
5171 
5172 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5173 {
5174 	switch(task->tk_status) {
5175 	case 0:
5176 	case -NFS4ERR_COMPLETE_ALREADY:
5177 	case -NFS4ERR_WRONG_CRED: /* What to do here? */
5178 		break;
5179 	case -NFS4ERR_DELAY:
5180 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5181 		return -EAGAIN;
5182 	default:
5183 		nfs4_schedule_state_recovery(clp);
5184 	}
5185 	return 0;
5186 }
5187 
5188 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5189 {
5190 	struct nfs4_reclaim_complete_data *calldata = data;
5191 	struct nfs_client *clp = calldata->clp;
5192 	struct nfs4_sequence_res *res = &calldata->res.seq_res;
5193 
5194 	dprintk("--> %s\n", __func__);
5195 	if (!nfs41_sequence_done(task, res))
5196 		return;
5197 
5198 	if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5199 		rpc_restart_call_prepare(task);
5200 		return;
5201 	}
5202 	dprintk("<-- %s\n", __func__);
5203 }
5204 
5205 static void nfs4_free_reclaim_complete_data(void *data)
5206 {
5207 	struct nfs4_reclaim_complete_data *calldata = data;
5208 
5209 	kfree(calldata);
5210 }
5211 
5212 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5213 	.rpc_call_prepare = nfs4_reclaim_complete_prepare,
5214 	.rpc_call_done = nfs4_reclaim_complete_done,
5215 	.rpc_release = nfs4_free_reclaim_complete_data,
5216 };
5217 
5218 /*
5219  * Issue a global reclaim complete.
5220  */
5221 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5222 {
5223 	struct nfs4_reclaim_complete_data *calldata;
5224 	struct rpc_task *task;
5225 	struct rpc_message msg = {
5226 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5227 	};
5228 	struct rpc_task_setup task_setup_data = {
5229 		.rpc_client = clp->cl_rpcclient,
5230 		.rpc_message = &msg,
5231 		.callback_ops = &nfs4_reclaim_complete_call_ops,
5232 		.flags = RPC_TASK_ASYNC,
5233 	};
5234 	int status = -ENOMEM;
5235 
5236 	dprintk("--> %s\n", __func__);
5237 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5238 	if (calldata == NULL)
5239 		goto out;
5240 	calldata->clp = clp;
5241 	calldata->arg.one_fs = 0;
5242 
5243 	msg.rpc_argp = &calldata->arg;
5244 	msg.rpc_resp = &calldata->res;
5245 	task_setup_data.callback_data = calldata;
5246 	task = rpc_run_task(&task_setup_data);
5247 	if (IS_ERR(task)) {
5248 		status = PTR_ERR(task);
5249 		goto out;
5250 	}
5251 	rpc_put_task(task);
5252 	return 0;
5253 out:
5254 	dprintk("<-- %s status=%d\n", __func__, status);
5255 	return status;
5256 }
5257 #endif /* CONFIG_NFS_V4_1 */
5258 
5259 struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
5260 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
5261 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
5262 	.recover_open	= nfs4_open_reclaim,
5263 	.recover_lock	= nfs4_lock_reclaim,
5264 	.establish_clid = nfs4_init_clientid,
5265 	.get_clid_cred	= nfs4_get_setclientid_cred,
5266 };
5267 
5268 #if defined(CONFIG_NFS_V4_1)
5269 struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
5270 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
5271 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
5272 	.recover_open	= nfs4_open_reclaim,
5273 	.recover_lock	= nfs4_lock_reclaim,
5274 	.establish_clid = nfs41_init_clientid,
5275 	.get_clid_cred	= nfs4_get_exchange_id_cred,
5276 	.reclaim_complete = nfs41_proc_reclaim_complete,
5277 };
5278 #endif /* CONFIG_NFS_V4_1 */
5279 
5280 struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
5281 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
5282 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
5283 	.recover_open	= nfs4_open_expired,
5284 	.recover_lock	= nfs4_lock_expired,
5285 	.establish_clid = nfs4_init_clientid,
5286 	.get_clid_cred	= nfs4_get_setclientid_cred,
5287 };
5288 
5289 #if defined(CONFIG_NFS_V4_1)
5290 struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
5291 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
5292 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
5293 	.recover_open	= nfs4_open_expired,
5294 	.recover_lock	= nfs4_lock_expired,
5295 	.establish_clid = nfs41_init_clientid,
5296 	.get_clid_cred	= nfs4_get_exchange_id_cred,
5297 };
5298 #endif /* CONFIG_NFS_V4_1 */
5299 
5300 struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
5301 	.sched_state_renewal = nfs4_proc_async_renew,
5302 	.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
5303 	.renew_lease = nfs4_proc_renew,
5304 };
5305 
5306 #if defined(CONFIG_NFS_V4_1)
5307 struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
5308 	.sched_state_renewal = nfs41_proc_async_sequence,
5309 	.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
5310 	.renew_lease = nfs4_proc_sequence,
5311 };
5312 #endif
5313 
5314 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
5315 	.minor_version = 0,
5316 	.call_sync = _nfs4_call_sync,
5317 	.validate_stateid = nfs4_validate_delegation_stateid,
5318 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
5319 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
5320 	.state_renewal_ops = &nfs40_state_renewal_ops,
5321 };
5322 
5323 #if defined(CONFIG_NFS_V4_1)
5324 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
5325 	.minor_version = 1,
5326 	.call_sync = _nfs4_call_sync_session,
5327 	.validate_stateid = nfs41_validate_delegation_stateid,
5328 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
5329 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
5330 	.state_renewal_ops = &nfs41_state_renewal_ops,
5331 };
5332 #endif
5333 
5334 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
5335 	[0] = &nfs_v4_0_minor_ops,
5336 #if defined(CONFIG_NFS_V4_1)
5337 	[1] = &nfs_v4_1_minor_ops,
5338 #endif
5339 };
5340 
5341 static const struct inode_operations nfs4_file_inode_operations = {
5342 	.permission	= nfs_permission,
5343 	.getattr	= nfs_getattr,
5344 	.setattr	= nfs_setattr,
5345 	.getxattr	= nfs4_getxattr,
5346 	.setxattr	= nfs4_setxattr,
5347 	.listxattr	= nfs4_listxattr,
5348 };
5349 
5350 const struct nfs_rpc_ops nfs_v4_clientops = {
5351 	.version	= 4,			/* protocol version */
5352 	.dentry_ops	= &nfs4_dentry_operations,
5353 	.dir_inode_ops	= &nfs4_dir_inode_operations,
5354 	.file_inode_ops	= &nfs4_file_inode_operations,
5355 	.getroot	= nfs4_proc_get_root,
5356 	.getattr	= nfs4_proc_getattr,
5357 	.setattr	= nfs4_proc_setattr,
5358 	.lookupfh	= nfs4_proc_lookupfh,
5359 	.lookup		= nfs4_proc_lookup,
5360 	.access		= nfs4_proc_access,
5361 	.readlink	= nfs4_proc_readlink,
5362 	.create		= nfs4_proc_create,
5363 	.remove		= nfs4_proc_remove,
5364 	.unlink_setup	= nfs4_proc_unlink_setup,
5365 	.unlink_done	= nfs4_proc_unlink_done,
5366 	.rename		= nfs4_proc_rename,
5367 	.rename_setup	= nfs4_proc_rename_setup,
5368 	.rename_done	= nfs4_proc_rename_done,
5369 	.link		= nfs4_proc_link,
5370 	.symlink	= nfs4_proc_symlink,
5371 	.mkdir		= nfs4_proc_mkdir,
5372 	.rmdir		= nfs4_proc_remove,
5373 	.readdir	= nfs4_proc_readdir,
5374 	.mknod		= nfs4_proc_mknod,
5375 	.statfs		= nfs4_proc_statfs,
5376 	.fsinfo		= nfs4_proc_fsinfo,
5377 	.pathconf	= nfs4_proc_pathconf,
5378 	.set_capabilities = nfs4_server_capabilities,
5379 	.decode_dirent	= nfs4_decode_dirent,
5380 	.read_setup	= nfs4_proc_read_setup,
5381 	.read_done	= nfs4_read_done,
5382 	.write_setup	= nfs4_proc_write_setup,
5383 	.write_done	= nfs4_write_done,
5384 	.commit_setup	= nfs4_proc_commit_setup,
5385 	.commit_done	= nfs4_commit_done,
5386 	.lock		= nfs4_proc_lock,
5387 	.clear_acl_cache = nfs4_zap_acl_attr,
5388 	.close_context  = nfs4_close_context,
5389 	.open_context	= nfs4_atomic_open,
5390 };
5391 
5392 /*
5393  * Local variables:
5394  *  c-basic-offset: 8
5395  * End:
5396  */
5397