xref: /openbmc/linux/fs/nfs/nfs4state.c (revision 4dc7ccf7)
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40 
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
51 
52 #include "nfs4_fs.h"
53 #include "callback.h"
54 #include "delegation.h"
55 #include "internal.h"
56 
57 #define OPENOWNER_POOL_SIZE	8
58 
59 const nfs4_stateid zero_stateid;
60 
61 static LIST_HEAD(nfs4_clientid_list);
62 
63 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
64 {
65 	unsigned short port;
66 	int status;
67 
68 	port = nfs_callback_tcpport;
69 	if (clp->cl_addr.ss_family == AF_INET6)
70 		port = nfs_callback_tcpport6;
71 
72 	status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
73 	if (status == 0)
74 		status = nfs4_proc_setclientid_confirm(clp, cred);
75 	if (status == 0)
76 		nfs4_schedule_state_renewal(clp);
77 	return status;
78 }
79 
80 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
81 {
82 	struct rpc_cred *cred = NULL;
83 
84 	if (clp->cl_machine_cred != NULL)
85 		cred = get_rpccred(clp->cl_machine_cred);
86 	return cred;
87 }
88 
89 static void nfs4_clear_machine_cred(struct nfs_client *clp)
90 {
91 	struct rpc_cred *cred;
92 
93 	spin_lock(&clp->cl_lock);
94 	cred = clp->cl_machine_cred;
95 	clp->cl_machine_cred = NULL;
96 	spin_unlock(&clp->cl_lock);
97 	if (cred != NULL)
98 		put_rpccred(cred);
99 }
100 
101 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
102 {
103 	struct nfs4_state_owner *sp;
104 	struct rb_node *pos;
105 	struct rpc_cred *cred = NULL;
106 
107 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
108 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
109 		if (list_empty(&sp->so_states))
110 			continue;
111 		cred = get_rpccred(sp->so_cred);
112 		break;
113 	}
114 	return cred;
115 }
116 
117 #if defined(CONFIG_NFS_V4_1)
118 
119 static int nfs41_setup_state_renewal(struct nfs_client *clp)
120 {
121 	int status;
122 	struct nfs_fsinfo fsinfo;
123 
124 	status = nfs4_proc_get_lease_time(clp, &fsinfo);
125 	if (status == 0) {
126 		/* Update lease time and schedule renewal */
127 		spin_lock(&clp->cl_lock);
128 		clp->cl_lease_time = fsinfo.lease_time * HZ;
129 		clp->cl_last_renewal = jiffies;
130 		spin_unlock(&clp->cl_lock);
131 
132 		nfs4_schedule_state_renewal(clp);
133 	}
134 
135 	return status;
136 }
137 
138 static void nfs4_end_drain_session(struct nfs_client *clp)
139 {
140 	struct nfs4_session *ses = clp->cl_session;
141 	int max_slots;
142 
143 	if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
144 		spin_lock(&ses->fc_slot_table.slot_tbl_lock);
145 		max_slots = ses->fc_slot_table.max_slots;
146 		while (max_slots--) {
147 			struct rpc_task *task;
148 
149 			task = rpc_wake_up_next(&ses->fc_slot_table.
150 						slot_tbl_waitq);
151 			if (!task)
152 				break;
153 			rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
154 		}
155 		spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
156 	}
157 }
158 
159 static int nfs4_begin_drain_session(struct nfs_client *clp)
160 {
161 	struct nfs4_session *ses = clp->cl_session;
162 	struct nfs4_slot_table *tbl = &ses->fc_slot_table;
163 
164 	spin_lock(&tbl->slot_tbl_lock);
165 	set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
166 	if (tbl->highest_used_slotid != -1) {
167 		INIT_COMPLETION(ses->complete);
168 		spin_unlock(&tbl->slot_tbl_lock);
169 		return wait_for_completion_interruptible(&ses->complete);
170 	}
171 	spin_unlock(&tbl->slot_tbl_lock);
172 	return 0;
173 }
174 
175 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
176 {
177 	int status;
178 
179 	nfs4_begin_drain_session(clp);
180 	status = nfs4_proc_exchange_id(clp, cred);
181 	if (status != 0)
182 		goto out;
183 	status = nfs4_proc_create_session(clp);
184 	if (status != 0)
185 		goto out;
186 	nfs41_setup_state_renewal(clp);
187 	nfs_mark_client_ready(clp, NFS_CS_READY);
188 out:
189 	return status;
190 }
191 
192 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
193 {
194 	struct rpc_cred *cred;
195 
196 	spin_lock(&clp->cl_lock);
197 	cred = nfs4_get_machine_cred_locked(clp);
198 	spin_unlock(&clp->cl_lock);
199 	return cred;
200 }
201 
202 #endif /* CONFIG_NFS_V4_1 */
203 
204 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
205 {
206 	struct nfs4_state_owner *sp;
207 	struct rb_node *pos;
208 	struct rpc_cred *cred;
209 
210 	spin_lock(&clp->cl_lock);
211 	cred = nfs4_get_machine_cred_locked(clp);
212 	if (cred != NULL)
213 		goto out;
214 	pos = rb_first(&clp->cl_state_owners);
215 	if (pos != NULL) {
216 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
217 		cred = get_rpccred(sp->so_cred);
218 	}
219 out:
220 	spin_unlock(&clp->cl_lock);
221 	return cred;
222 }
223 
224 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
225 		__u64 minval, int maxbits)
226 {
227 	struct rb_node **p, *parent;
228 	struct nfs_unique_id *pos;
229 	__u64 mask = ~0ULL;
230 
231 	if (maxbits < 64)
232 		mask = (1ULL << maxbits) - 1ULL;
233 
234 	/* Ensure distribution is more or less flat */
235 	get_random_bytes(&new->id, sizeof(new->id));
236 	new->id &= mask;
237 	if (new->id < minval)
238 		new->id += minval;
239 retry:
240 	p = &root->rb_node;
241 	parent = NULL;
242 
243 	while (*p != NULL) {
244 		parent = *p;
245 		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
246 
247 		if (new->id < pos->id)
248 			p = &(*p)->rb_left;
249 		else if (new->id > pos->id)
250 			p = &(*p)->rb_right;
251 		else
252 			goto id_exists;
253 	}
254 	rb_link_node(&new->rb_node, parent, p);
255 	rb_insert_color(&new->rb_node, root);
256 	return;
257 id_exists:
258 	for (;;) {
259 		new->id++;
260 		if (new->id < minval || (new->id & mask) != new->id) {
261 			new->id = minval;
262 			break;
263 		}
264 		parent = rb_next(parent);
265 		if (parent == NULL)
266 			break;
267 		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
268 		if (new->id < pos->id)
269 			break;
270 	}
271 	goto retry;
272 }
273 
274 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
275 {
276 	rb_erase(&id->rb_node, root);
277 }
278 
279 static struct nfs4_state_owner *
280 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
281 {
282 	struct nfs_client *clp = server->nfs_client;
283 	struct rb_node **p = &clp->cl_state_owners.rb_node,
284 		       *parent = NULL;
285 	struct nfs4_state_owner *sp, *res = NULL;
286 
287 	while (*p != NULL) {
288 		parent = *p;
289 		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
290 
291 		if (server < sp->so_server) {
292 			p = &parent->rb_left;
293 			continue;
294 		}
295 		if (server > sp->so_server) {
296 			p = &parent->rb_right;
297 			continue;
298 		}
299 		if (cred < sp->so_cred)
300 			p = &parent->rb_left;
301 		else if (cred > sp->so_cred)
302 			p = &parent->rb_right;
303 		else {
304 			atomic_inc(&sp->so_count);
305 			res = sp;
306 			break;
307 		}
308 	}
309 	return res;
310 }
311 
312 static struct nfs4_state_owner *
313 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
314 {
315 	struct rb_node **p = &clp->cl_state_owners.rb_node,
316 		       *parent = NULL;
317 	struct nfs4_state_owner *sp;
318 
319 	while (*p != NULL) {
320 		parent = *p;
321 		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
322 
323 		if (new->so_server < sp->so_server) {
324 			p = &parent->rb_left;
325 			continue;
326 		}
327 		if (new->so_server > sp->so_server) {
328 			p = &parent->rb_right;
329 			continue;
330 		}
331 		if (new->so_cred < sp->so_cred)
332 			p = &parent->rb_left;
333 		else if (new->so_cred > sp->so_cred)
334 			p = &parent->rb_right;
335 		else {
336 			atomic_inc(&sp->so_count);
337 			return sp;
338 		}
339 	}
340 	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
341 	rb_link_node(&new->so_client_node, parent, p);
342 	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
343 	return new;
344 }
345 
346 static void
347 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
348 {
349 	if (!RB_EMPTY_NODE(&sp->so_client_node))
350 		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
351 	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
352 }
353 
354 /*
355  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
356  * create a new state_owner.
357  *
358  */
359 static struct nfs4_state_owner *
360 nfs4_alloc_state_owner(void)
361 {
362 	struct nfs4_state_owner *sp;
363 
364 	sp = kzalloc(sizeof(*sp),GFP_KERNEL);
365 	if (!sp)
366 		return NULL;
367 	spin_lock_init(&sp->so_lock);
368 	INIT_LIST_HEAD(&sp->so_states);
369 	INIT_LIST_HEAD(&sp->so_delegations);
370 	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
371 	sp->so_seqid.sequence = &sp->so_sequence;
372 	spin_lock_init(&sp->so_sequence.lock);
373 	INIT_LIST_HEAD(&sp->so_sequence.list);
374 	atomic_set(&sp->so_count, 1);
375 	return sp;
376 }
377 
378 static void
379 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
380 {
381 	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
382 		struct nfs_client *clp = sp->so_client;
383 
384 		spin_lock(&clp->cl_lock);
385 		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
386 		RB_CLEAR_NODE(&sp->so_client_node);
387 		spin_unlock(&clp->cl_lock);
388 	}
389 }
390 
391 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
392 {
393 	struct nfs_client *clp = server->nfs_client;
394 	struct nfs4_state_owner *sp, *new;
395 
396 	spin_lock(&clp->cl_lock);
397 	sp = nfs4_find_state_owner(server, cred);
398 	spin_unlock(&clp->cl_lock);
399 	if (sp != NULL)
400 		return sp;
401 	new = nfs4_alloc_state_owner();
402 	if (new == NULL)
403 		return NULL;
404 	new->so_client = clp;
405 	new->so_server = server;
406 	new->so_cred = cred;
407 	spin_lock(&clp->cl_lock);
408 	sp = nfs4_insert_state_owner(clp, new);
409 	spin_unlock(&clp->cl_lock);
410 	if (sp == new)
411 		get_rpccred(cred);
412 	else {
413 		rpc_destroy_wait_queue(&new->so_sequence.wait);
414 		kfree(new);
415 	}
416 	return sp;
417 }
418 
419 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
420 {
421 	struct nfs_client *clp = sp->so_client;
422 	struct rpc_cred *cred = sp->so_cred;
423 
424 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
425 		return;
426 	nfs4_remove_state_owner(clp, sp);
427 	spin_unlock(&clp->cl_lock);
428 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
429 	put_rpccred(cred);
430 	kfree(sp);
431 }
432 
433 static struct nfs4_state *
434 nfs4_alloc_open_state(void)
435 {
436 	struct nfs4_state *state;
437 
438 	state = kzalloc(sizeof(*state), GFP_KERNEL);
439 	if (!state)
440 		return NULL;
441 	atomic_set(&state->count, 1);
442 	INIT_LIST_HEAD(&state->lock_states);
443 	spin_lock_init(&state->state_lock);
444 	seqlock_init(&state->seqlock);
445 	return state;
446 }
447 
448 void
449 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
450 {
451 	if (state->state == fmode)
452 		return;
453 	/* NB! List reordering - see the reclaim code for why.  */
454 	if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
455 		if (fmode & FMODE_WRITE)
456 			list_move(&state->open_states, &state->owner->so_states);
457 		else
458 			list_move_tail(&state->open_states, &state->owner->so_states);
459 	}
460 	state->state = fmode;
461 }
462 
463 static struct nfs4_state *
464 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
465 {
466 	struct nfs_inode *nfsi = NFS_I(inode);
467 	struct nfs4_state *state;
468 
469 	list_for_each_entry(state, &nfsi->open_states, inode_states) {
470 		if (state->owner != owner)
471 			continue;
472 		if (atomic_inc_not_zero(&state->count))
473 			return state;
474 	}
475 	return NULL;
476 }
477 
478 static void
479 nfs4_free_open_state(struct nfs4_state *state)
480 {
481 	kfree(state);
482 }
483 
484 struct nfs4_state *
485 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
486 {
487 	struct nfs4_state *state, *new;
488 	struct nfs_inode *nfsi = NFS_I(inode);
489 
490 	spin_lock(&inode->i_lock);
491 	state = __nfs4_find_state_byowner(inode, owner);
492 	spin_unlock(&inode->i_lock);
493 	if (state)
494 		goto out;
495 	new = nfs4_alloc_open_state();
496 	spin_lock(&owner->so_lock);
497 	spin_lock(&inode->i_lock);
498 	state = __nfs4_find_state_byowner(inode, owner);
499 	if (state == NULL && new != NULL) {
500 		state = new;
501 		state->owner = owner;
502 		atomic_inc(&owner->so_count);
503 		list_add(&state->inode_states, &nfsi->open_states);
504 		state->inode = igrab(inode);
505 		spin_unlock(&inode->i_lock);
506 		/* Note: The reclaim code dictates that we add stateless
507 		 * and read-only stateids to the end of the list */
508 		list_add_tail(&state->open_states, &owner->so_states);
509 		spin_unlock(&owner->so_lock);
510 	} else {
511 		spin_unlock(&inode->i_lock);
512 		spin_unlock(&owner->so_lock);
513 		if (new)
514 			nfs4_free_open_state(new);
515 	}
516 out:
517 	return state;
518 }
519 
520 void nfs4_put_open_state(struct nfs4_state *state)
521 {
522 	struct inode *inode = state->inode;
523 	struct nfs4_state_owner *owner = state->owner;
524 
525 	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
526 		return;
527 	spin_lock(&inode->i_lock);
528 	list_del(&state->inode_states);
529 	list_del(&state->open_states);
530 	spin_unlock(&inode->i_lock);
531 	spin_unlock(&owner->so_lock);
532 	iput(inode);
533 	nfs4_free_open_state(state);
534 	nfs4_put_state_owner(owner);
535 }
536 
537 /*
538  * Close the current file.
539  */
540 static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
541 {
542 	struct nfs4_state_owner *owner = state->owner;
543 	int call_close = 0;
544 	fmode_t newstate;
545 
546 	atomic_inc(&owner->so_count);
547 	/* Protect against nfs4_find_state() */
548 	spin_lock(&owner->so_lock);
549 	switch (fmode & (FMODE_READ | FMODE_WRITE)) {
550 		case FMODE_READ:
551 			state->n_rdonly--;
552 			break;
553 		case FMODE_WRITE:
554 			state->n_wronly--;
555 			break;
556 		case FMODE_READ|FMODE_WRITE:
557 			state->n_rdwr--;
558 	}
559 	newstate = FMODE_READ|FMODE_WRITE;
560 	if (state->n_rdwr == 0) {
561 		if (state->n_rdonly == 0) {
562 			newstate &= ~FMODE_READ;
563 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
564 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
565 		}
566 		if (state->n_wronly == 0) {
567 			newstate &= ~FMODE_WRITE;
568 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
569 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
570 		}
571 		if (newstate == 0)
572 			clear_bit(NFS_DELEGATED_STATE, &state->flags);
573 	}
574 	nfs4_state_set_mode_locked(state, newstate);
575 	spin_unlock(&owner->so_lock);
576 
577 	if (!call_close) {
578 		nfs4_put_open_state(state);
579 		nfs4_put_state_owner(owner);
580 	} else
581 		nfs4_do_close(path, state, wait);
582 }
583 
584 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
585 {
586 	__nfs4_close(path, state, fmode, 0);
587 }
588 
589 void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
590 {
591 	__nfs4_close(path, state, fmode, 1);
592 }
593 
594 /*
595  * Search the state->lock_states for an existing lock_owner
596  * that is compatible with current->files
597  */
598 static struct nfs4_lock_state *
599 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
600 {
601 	struct nfs4_lock_state *pos;
602 	list_for_each_entry(pos, &state->lock_states, ls_locks) {
603 		if (pos->ls_owner != fl_owner)
604 			continue;
605 		atomic_inc(&pos->ls_count);
606 		return pos;
607 	}
608 	return NULL;
609 }
610 
611 /*
612  * Return a compatible lock_state. If no initialized lock_state structure
613  * exists, return an uninitialized one.
614  *
615  */
616 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
617 {
618 	struct nfs4_lock_state *lsp;
619 	struct nfs_client *clp = state->owner->so_client;
620 
621 	lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
622 	if (lsp == NULL)
623 		return NULL;
624 	rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
625 	spin_lock_init(&lsp->ls_sequence.lock);
626 	INIT_LIST_HEAD(&lsp->ls_sequence.list);
627 	lsp->ls_seqid.sequence = &lsp->ls_sequence;
628 	atomic_set(&lsp->ls_count, 1);
629 	lsp->ls_state = state;
630 	lsp->ls_owner = fl_owner;
631 	spin_lock(&clp->cl_lock);
632 	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
633 	spin_unlock(&clp->cl_lock);
634 	INIT_LIST_HEAD(&lsp->ls_locks);
635 	return lsp;
636 }
637 
638 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
639 {
640 	struct nfs_client *clp = lsp->ls_state->owner->so_client;
641 
642 	spin_lock(&clp->cl_lock);
643 	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
644 	spin_unlock(&clp->cl_lock);
645 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
646 	kfree(lsp);
647 }
648 
649 /*
650  * Return a compatible lock_state. If no initialized lock_state structure
651  * exists, return an uninitialized one.
652  *
653  */
654 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
655 {
656 	struct nfs4_lock_state *lsp, *new = NULL;
657 
658 	for(;;) {
659 		spin_lock(&state->state_lock);
660 		lsp = __nfs4_find_lock_state(state, owner);
661 		if (lsp != NULL)
662 			break;
663 		if (new != NULL) {
664 			list_add(&new->ls_locks, &state->lock_states);
665 			set_bit(LK_STATE_IN_USE, &state->flags);
666 			lsp = new;
667 			new = NULL;
668 			break;
669 		}
670 		spin_unlock(&state->state_lock);
671 		new = nfs4_alloc_lock_state(state, owner);
672 		if (new == NULL)
673 			return NULL;
674 	}
675 	spin_unlock(&state->state_lock);
676 	if (new != NULL)
677 		nfs4_free_lock_state(new);
678 	return lsp;
679 }
680 
681 /*
682  * Release reference to lock_state, and free it if we see that
683  * it is no longer in use
684  */
685 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
686 {
687 	struct nfs4_state *state;
688 
689 	if (lsp == NULL)
690 		return;
691 	state = lsp->ls_state;
692 	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
693 		return;
694 	list_del(&lsp->ls_locks);
695 	if (list_empty(&state->lock_states))
696 		clear_bit(LK_STATE_IN_USE, &state->flags);
697 	spin_unlock(&state->state_lock);
698 	nfs4_free_lock_state(lsp);
699 }
700 
701 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
702 {
703 	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
704 
705 	dst->fl_u.nfs4_fl.owner = lsp;
706 	atomic_inc(&lsp->ls_count);
707 }
708 
709 static void nfs4_fl_release_lock(struct file_lock *fl)
710 {
711 	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
712 }
713 
714 static const struct file_lock_operations nfs4_fl_lock_ops = {
715 	.fl_copy_lock = nfs4_fl_copy_lock,
716 	.fl_release_private = nfs4_fl_release_lock,
717 };
718 
719 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
720 {
721 	struct nfs4_lock_state *lsp;
722 
723 	if (fl->fl_ops != NULL)
724 		return 0;
725 	lsp = nfs4_get_lock_state(state, fl->fl_owner);
726 	if (lsp == NULL)
727 		return -ENOMEM;
728 	fl->fl_u.nfs4_fl.owner = lsp;
729 	fl->fl_ops = &nfs4_fl_lock_ops;
730 	return 0;
731 }
732 
733 /*
734  * Byte-range lock aware utility to initialize the stateid of read/write
735  * requests.
736  */
737 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
738 {
739 	struct nfs4_lock_state *lsp;
740 	int seq;
741 
742 	do {
743 		seq = read_seqbegin(&state->seqlock);
744 		memcpy(dst, &state->stateid, sizeof(*dst));
745 	} while (read_seqretry(&state->seqlock, seq));
746 	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
747 		return;
748 
749 	spin_lock(&state->state_lock);
750 	lsp = __nfs4_find_lock_state(state, fl_owner);
751 	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
752 		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
753 	spin_unlock(&state->state_lock);
754 	nfs4_put_lock_state(lsp);
755 }
756 
757 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
758 {
759 	struct nfs_seqid *new;
760 
761 	new = kmalloc(sizeof(*new), GFP_KERNEL);
762 	if (new != NULL) {
763 		new->sequence = counter;
764 		INIT_LIST_HEAD(&new->list);
765 	}
766 	return new;
767 }
768 
769 void nfs_release_seqid(struct nfs_seqid *seqid)
770 {
771 	if (!list_empty(&seqid->list)) {
772 		struct rpc_sequence *sequence = seqid->sequence->sequence;
773 
774 		spin_lock(&sequence->lock);
775 		list_del_init(&seqid->list);
776 		spin_unlock(&sequence->lock);
777 		rpc_wake_up(&sequence->wait);
778 	}
779 }
780 
781 void nfs_free_seqid(struct nfs_seqid *seqid)
782 {
783 	nfs_release_seqid(seqid);
784 	kfree(seqid);
785 }
786 
787 /*
788  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
789  * failed with a seqid incrementing error -
790  * see comments nfs_fs.h:seqid_mutating_error()
791  */
792 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
793 {
794 	BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
795 	switch (status) {
796 		case 0:
797 			break;
798 		case -NFS4ERR_BAD_SEQID:
799 			if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
800 				return;
801 			printk(KERN_WARNING "NFS: v4 server returned a bad"
802 					" sequence-id error on an"
803 					" unconfirmed sequence %p!\n",
804 					seqid->sequence);
805 		case -NFS4ERR_STALE_CLIENTID:
806 		case -NFS4ERR_STALE_STATEID:
807 		case -NFS4ERR_BAD_STATEID:
808 		case -NFS4ERR_BADXDR:
809 		case -NFS4ERR_RESOURCE:
810 		case -NFS4ERR_NOFILEHANDLE:
811 			/* Non-seqid mutating errors */
812 			return;
813 	};
814 	/*
815 	 * Note: no locking needed as we are guaranteed to be first
816 	 * on the sequence list
817 	 */
818 	seqid->sequence->counter++;
819 }
820 
821 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
822 {
823 	struct nfs4_state_owner *sp = container_of(seqid->sequence,
824 					struct nfs4_state_owner, so_seqid);
825 	struct nfs_server *server = sp->so_server;
826 
827 	if (status == -NFS4ERR_BAD_SEQID)
828 		nfs4_drop_state_owner(sp);
829 	if (!nfs4_has_session(server->nfs_client))
830 		nfs_increment_seqid(status, seqid);
831 }
832 
833 /*
834  * Increment the seqid if the LOCK/LOCKU succeeded, or
835  * failed with a seqid incrementing error -
836  * see comments nfs_fs.h:seqid_mutating_error()
837  */
838 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
839 {
840 	nfs_increment_seqid(status, seqid);
841 }
842 
843 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
844 {
845 	struct rpc_sequence *sequence = seqid->sequence->sequence;
846 	int status = 0;
847 
848 	spin_lock(&sequence->lock);
849 	if (list_empty(&seqid->list))
850 		list_add_tail(&seqid->list, &sequence->list);
851 	if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
852 		goto unlock;
853 	rpc_sleep_on(&sequence->wait, task, NULL);
854 	status = -EAGAIN;
855 unlock:
856 	spin_unlock(&sequence->lock);
857 	return status;
858 }
859 
860 static int nfs4_run_state_manager(void *);
861 
862 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
863 {
864 	smp_mb__before_clear_bit();
865 	clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
866 	smp_mb__after_clear_bit();
867 	wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
868 	rpc_wake_up(&clp->cl_rpcwaitq);
869 }
870 
871 /*
872  * Schedule the nfs_client asynchronous state management routine
873  */
874 void nfs4_schedule_state_manager(struct nfs_client *clp)
875 {
876 	struct task_struct *task;
877 
878 	if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
879 		return;
880 	__module_get(THIS_MODULE);
881 	atomic_inc(&clp->cl_count);
882 	task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
883 				rpc_peeraddr2str(clp->cl_rpcclient,
884 							RPC_DISPLAY_ADDR));
885 	if (!IS_ERR(task))
886 		return;
887 	nfs4_clear_state_manager_bit(clp);
888 	nfs_put_client(clp);
889 	module_put(THIS_MODULE);
890 }
891 
892 /*
893  * Schedule a state recovery attempt
894  */
895 void nfs4_schedule_state_recovery(struct nfs_client *clp)
896 {
897 	if (!clp)
898 		return;
899 	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
900 		set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
901 	nfs4_schedule_state_manager(clp);
902 }
903 
904 int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
905 {
906 
907 	set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
908 	/* Don't recover state that expired before the reboot */
909 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
910 		clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
911 		return 0;
912 	}
913 	set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
914 	set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
915 	return 1;
916 }
917 
918 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
919 {
920 	set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
921 	clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
922 	set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
923 	set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
924 	return 1;
925 }
926 
927 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
928 {
929 	struct inode *inode = state->inode;
930 	struct nfs_inode *nfsi = NFS_I(inode);
931 	struct file_lock *fl;
932 	int status = 0;
933 
934 	if (inode->i_flock == NULL)
935 		return 0;
936 
937 	/* Guard against delegation returns and new lock/unlock calls */
938 	down_write(&nfsi->rwsem);
939 	/* Protect inode->i_flock using the BKL */
940 	lock_kernel();
941 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
942 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
943 			continue;
944 		if (nfs_file_open_context(fl->fl_file)->state != state)
945 			continue;
946 		unlock_kernel();
947 		status = ops->recover_lock(state, fl);
948 		switch (status) {
949 			case 0:
950 				break;
951 			case -ESTALE:
952 			case -NFS4ERR_ADMIN_REVOKED:
953 			case -NFS4ERR_STALE_STATEID:
954 			case -NFS4ERR_BAD_STATEID:
955 			case -NFS4ERR_EXPIRED:
956 			case -NFS4ERR_NO_GRACE:
957 			case -NFS4ERR_STALE_CLIENTID:
958 			case -NFS4ERR_BADSESSION:
959 			case -NFS4ERR_BADSLOT:
960 			case -NFS4ERR_BAD_HIGH_SLOT:
961 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
962 				goto out;
963 			default:
964 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
965 						__func__, status);
966 			case -ENOMEM:
967 			case -NFS4ERR_DENIED:
968 			case -NFS4ERR_RECLAIM_BAD:
969 			case -NFS4ERR_RECLAIM_CONFLICT:
970 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
971 				status = 0;
972 		}
973 		lock_kernel();
974 	}
975 	unlock_kernel();
976 out:
977 	up_write(&nfsi->rwsem);
978 	return status;
979 }
980 
981 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
982 {
983 	struct nfs4_state *state;
984 	struct nfs4_lock_state *lock;
985 	int status = 0;
986 
987 	/* Note: we rely on the sp->so_states list being ordered
988 	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
989 	 * states first.
990 	 * This is needed to ensure that the server won't give us any
991 	 * read delegations that we have to return if, say, we are
992 	 * recovering after a network partition or a reboot from a
993 	 * server that doesn't support a grace period.
994 	 */
995 restart:
996 	spin_lock(&sp->so_lock);
997 	list_for_each_entry(state, &sp->so_states, open_states) {
998 		if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
999 			continue;
1000 		if (state->state == 0)
1001 			continue;
1002 		atomic_inc(&state->count);
1003 		spin_unlock(&sp->so_lock);
1004 		status = ops->recover_open(sp, state);
1005 		if (status >= 0) {
1006 			status = nfs4_reclaim_locks(state, ops);
1007 			if (status >= 0) {
1008 				list_for_each_entry(lock, &state->lock_states, ls_locks) {
1009 					if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1010 						printk("%s: Lock reclaim failed!\n",
1011 							__func__);
1012 				}
1013 				nfs4_put_open_state(state);
1014 				goto restart;
1015 			}
1016 		}
1017 		switch (status) {
1018 			default:
1019 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
1020 						__func__, status);
1021 			case -ENOENT:
1022 			case -ENOMEM:
1023 			case -ESTALE:
1024 				/*
1025 				 * Open state on this file cannot be recovered
1026 				 * All we can do is revert to using the zero stateid.
1027 				 */
1028 				memset(state->stateid.data, 0,
1029 					sizeof(state->stateid.data));
1030 				/* Mark the file as being 'closed' */
1031 				state->state = 0;
1032 				break;
1033 			case -NFS4ERR_ADMIN_REVOKED:
1034 			case -NFS4ERR_STALE_STATEID:
1035 			case -NFS4ERR_BAD_STATEID:
1036 			case -NFS4ERR_RECLAIM_BAD:
1037 			case -NFS4ERR_RECLAIM_CONFLICT:
1038 				nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1039 				break;
1040 			case -NFS4ERR_EXPIRED:
1041 			case -NFS4ERR_NO_GRACE:
1042 				nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1043 			case -NFS4ERR_STALE_CLIENTID:
1044 			case -NFS4ERR_BADSESSION:
1045 			case -NFS4ERR_BADSLOT:
1046 			case -NFS4ERR_BAD_HIGH_SLOT:
1047 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1048 				goto out_err;
1049 		}
1050 		nfs4_put_open_state(state);
1051 		goto restart;
1052 	}
1053 	spin_unlock(&sp->so_lock);
1054 	return 0;
1055 out_err:
1056 	nfs4_put_open_state(state);
1057 	return status;
1058 }
1059 
1060 static void nfs4_clear_open_state(struct nfs4_state *state)
1061 {
1062 	struct nfs4_lock_state *lock;
1063 
1064 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1065 	clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1066 	clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1067 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1068 	list_for_each_entry(lock, &state->lock_states, ls_locks) {
1069 		lock->ls_seqid.flags = 0;
1070 		lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1071 	}
1072 }
1073 
1074 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1075 {
1076 	struct nfs4_state_owner *sp;
1077 	struct rb_node *pos;
1078 	struct nfs4_state *state;
1079 
1080 	/* Reset all sequence ids to zero */
1081 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1082 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1083 		sp->so_seqid.flags = 0;
1084 		spin_lock(&sp->so_lock);
1085 		list_for_each_entry(state, &sp->so_states, open_states) {
1086 			if (mark_reclaim(clp, state))
1087 				nfs4_clear_open_state(state);
1088 		}
1089 		spin_unlock(&sp->so_lock);
1090 	}
1091 }
1092 
1093 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1094 {
1095 	/* Mark all delegations for reclaim */
1096 	nfs_delegation_mark_reclaim(clp);
1097 	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1098 }
1099 
1100 static void nfs4_reclaim_complete(struct nfs_client *clp,
1101 				 const struct nfs4_state_recovery_ops *ops)
1102 {
1103 	/* Notify the server we're done reclaiming our state */
1104 	if (ops->reclaim_complete)
1105 		(void)ops->reclaim_complete(clp);
1106 }
1107 
1108 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1109 {
1110 	struct nfs4_state_owner *sp;
1111 	struct rb_node *pos;
1112 	struct nfs4_state *state;
1113 
1114 	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1115 		return;
1116 
1117 	nfs4_reclaim_complete(clp,
1118 		nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1119 
1120 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1121 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1122 		spin_lock(&sp->so_lock);
1123 		list_for_each_entry(state, &sp->so_states, open_states) {
1124 			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1125 				continue;
1126 			nfs4_state_mark_reclaim_nograce(clp, state);
1127 		}
1128 		spin_unlock(&sp->so_lock);
1129 	}
1130 
1131 	nfs_delegation_reap_unclaimed(clp);
1132 }
1133 
1134 static void nfs_delegation_clear_all(struct nfs_client *clp)
1135 {
1136 	nfs_delegation_mark_reclaim(clp);
1137 	nfs_delegation_reap_unclaimed(clp);
1138 }
1139 
1140 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1141 {
1142 	nfs_delegation_clear_all(clp);
1143 	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1144 }
1145 
1146 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1147 {
1148 	switch (error) {
1149 		case -NFS4ERR_CB_PATH_DOWN:
1150 			nfs_handle_cb_pathdown(clp);
1151 			return 0;
1152 		case -NFS4ERR_NO_GRACE:
1153 			nfs4_state_end_reclaim_reboot(clp);
1154 			return 0;
1155 		case -NFS4ERR_STALE_CLIENTID:
1156 		case -NFS4ERR_LEASE_MOVED:
1157 			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1158 			nfs4_state_end_reclaim_reboot(clp);
1159 			nfs4_state_start_reclaim_reboot(clp);
1160 			break;
1161 		case -NFS4ERR_EXPIRED:
1162 			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1163 			nfs4_state_start_reclaim_nograce(clp);
1164 			break;
1165 		case -NFS4ERR_BADSESSION:
1166 		case -NFS4ERR_BADSLOT:
1167 		case -NFS4ERR_BAD_HIGH_SLOT:
1168 		case -NFS4ERR_DEADSESSION:
1169 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1170 		case -NFS4ERR_SEQ_FALSE_RETRY:
1171 		case -NFS4ERR_SEQ_MISORDERED:
1172 			set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1173 			/* Zero session reset errors */
1174 			return 0;
1175 	}
1176 	return error;
1177 }
1178 
1179 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1180 {
1181 	struct rb_node *pos;
1182 	int status = 0;
1183 
1184 restart:
1185 	spin_lock(&clp->cl_lock);
1186 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1187 		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1188 		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1189 			continue;
1190 		atomic_inc(&sp->so_count);
1191 		spin_unlock(&clp->cl_lock);
1192 		status = nfs4_reclaim_open_state(sp, ops);
1193 		if (status < 0) {
1194 			set_bit(ops->owner_flag_bit, &sp->so_flags);
1195 			nfs4_put_state_owner(sp);
1196 			return nfs4_recovery_handle_error(clp, status);
1197 		}
1198 		nfs4_put_state_owner(sp);
1199 		goto restart;
1200 	}
1201 	spin_unlock(&clp->cl_lock);
1202 	return status;
1203 }
1204 
1205 static int nfs4_check_lease(struct nfs_client *clp)
1206 {
1207 	struct rpc_cred *cred;
1208 	struct nfs4_state_maintenance_ops *ops =
1209 		nfs4_state_renewal_ops[clp->cl_minorversion];
1210 	int status = -NFS4ERR_EXPIRED;
1211 
1212 	/* Is the client already known to have an expired lease? */
1213 	if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1214 		return 0;
1215 	spin_lock(&clp->cl_lock);
1216 	cred = ops->get_state_renewal_cred_locked(clp);
1217 	spin_unlock(&clp->cl_lock);
1218 	if (cred == NULL) {
1219 		cred = nfs4_get_setclientid_cred(clp);
1220 		if (cred == NULL)
1221 			goto out;
1222 	}
1223 	status = ops->renew_lease(clp, cred);
1224 	put_rpccred(cred);
1225 out:
1226 	return nfs4_recovery_handle_error(clp, status);
1227 }
1228 
1229 static int nfs4_reclaim_lease(struct nfs_client *clp)
1230 {
1231 	struct rpc_cred *cred;
1232 	struct nfs4_state_recovery_ops *ops =
1233 		nfs4_reboot_recovery_ops[clp->cl_minorversion];
1234 	int status = -ENOENT;
1235 
1236 	cred = ops->get_clid_cred(clp);
1237 	if (cred != NULL) {
1238 		status = ops->establish_clid(clp, cred);
1239 		put_rpccred(cred);
1240 		/* Handle case where the user hasn't set up machine creds */
1241 		if (status == -EACCES && cred == clp->cl_machine_cred) {
1242 			nfs4_clear_machine_cred(clp);
1243 			status = -EAGAIN;
1244 		}
1245 		if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1246 			status = -EPROTONOSUPPORT;
1247 	}
1248 	return status;
1249 }
1250 
1251 #ifdef CONFIG_NFS_V4_1
1252 void nfs41_handle_recall_slot(struct nfs_client *clp)
1253 {
1254 	set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1255 	nfs4_schedule_state_recovery(clp);
1256 }
1257 
1258 static void nfs4_reset_all_state(struct nfs_client *clp)
1259 {
1260 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1261 		clp->cl_boot_time = CURRENT_TIME;
1262 		nfs4_state_start_reclaim_nograce(clp);
1263 		nfs4_schedule_state_recovery(clp);
1264 	}
1265 }
1266 
1267 static void nfs41_handle_server_reboot(struct nfs_client *clp)
1268 {
1269 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1270 		nfs4_state_start_reclaim_reboot(clp);
1271 		nfs4_schedule_state_recovery(clp);
1272 	}
1273 }
1274 
1275 static void nfs41_handle_state_revoked(struct nfs_client *clp)
1276 {
1277 	/* Temporary */
1278 	nfs4_reset_all_state(clp);
1279 }
1280 
1281 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1282 {
1283 	/* This will need to handle layouts too */
1284 	nfs_expire_all_delegations(clp);
1285 }
1286 
1287 static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1288 {
1289 	nfs_expire_all_delegations(clp);
1290 	if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1291 		nfs4_schedule_state_recovery(clp);
1292 }
1293 
1294 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1295 {
1296 	if (!flags)
1297 		return;
1298 	else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1299 		nfs41_handle_server_reboot(clp);
1300 	else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1301 			    SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1302 			    SEQ4_STATUS_ADMIN_STATE_REVOKED |
1303 			    SEQ4_STATUS_LEASE_MOVED))
1304 		nfs41_handle_state_revoked(clp);
1305 	else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1306 		nfs41_handle_recallable_state_revoked(clp);
1307 	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1308 			    SEQ4_STATUS_BACKCHANNEL_FAULT |
1309 			    SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1310 		nfs41_handle_cb_path_down(clp);
1311 }
1312 
1313 static int nfs4_reset_session(struct nfs_client *clp)
1314 {
1315 	int status;
1316 
1317 	nfs4_begin_drain_session(clp);
1318 	status = nfs4_proc_destroy_session(clp->cl_session);
1319 	if (status && status != -NFS4ERR_BADSESSION &&
1320 	    status != -NFS4ERR_DEADSESSION) {
1321 		status = nfs4_recovery_handle_error(clp, status);
1322 		goto out;
1323 	}
1324 
1325 	memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1326 	status = nfs4_proc_create_session(clp);
1327 	if (status) {
1328 		status = nfs4_recovery_handle_error(clp, status);
1329 		goto out;
1330 	}
1331 	/* create_session negotiated new slot table */
1332 	clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1333 
1334 	 /* Let the state manager reestablish state */
1335 	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1336 		nfs41_setup_state_renewal(clp);
1337 out:
1338 	return status;
1339 }
1340 
1341 static int nfs4_recall_slot(struct nfs_client *clp)
1342 {
1343 	struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
1344 	struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
1345 	struct nfs4_slot *new, *old;
1346 	int i;
1347 
1348 	nfs4_begin_drain_session(clp);
1349 	new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
1350 		      GFP_KERNEL);
1351         if (!new)
1352 		return -ENOMEM;
1353 
1354 	spin_lock(&fc_tbl->slot_tbl_lock);
1355 	for (i = 0; i < fc_tbl->target_max_slots; i++)
1356 		new[i].seq_nr = fc_tbl->slots[i].seq_nr;
1357 	old = fc_tbl->slots;
1358 	fc_tbl->slots = new;
1359 	fc_tbl->max_slots = fc_tbl->target_max_slots;
1360 	fc_tbl->target_max_slots = 0;
1361 	fc_attrs->max_reqs = fc_tbl->max_slots;
1362 	spin_unlock(&fc_tbl->slot_tbl_lock);
1363 
1364 	kfree(old);
1365 	nfs4_end_drain_session(clp);
1366 	return 0;
1367 }
1368 
1369 #else /* CONFIG_NFS_V4_1 */
1370 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1371 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
1372 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1373 #endif /* CONFIG_NFS_V4_1 */
1374 
1375 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1376  * on EXCHANGE_ID for v4.1
1377  */
1378 static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1379 {
1380 	if (nfs4_has_session(clp)) {
1381 		switch (status) {
1382 		case -NFS4ERR_DELAY:
1383 		case -NFS4ERR_CLID_INUSE:
1384 		case -EAGAIN:
1385 		case -EKEYEXPIRED:
1386 			break;
1387 
1388 		case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1389 					 * in nfs4_exchange_id */
1390 		default:
1391 			return;
1392 		}
1393 	}
1394 	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1395 }
1396 
1397 static void nfs4_state_manager(struct nfs_client *clp)
1398 {
1399 	int status = 0;
1400 
1401 	/* Ensure exclusive access to NFSv4 state */
1402 	for(;;) {
1403 		if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1404 			/* We're going to have to re-establish a clientid */
1405 			status = nfs4_reclaim_lease(clp);
1406 			if (status) {
1407 				nfs4_set_lease_expired(clp, status);
1408 				if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1409 							&clp->cl_state))
1410 					continue;
1411 				if (clp->cl_cons_state ==
1412 							NFS_CS_SESSION_INITING)
1413 					nfs_mark_client_ready(clp, status);
1414 				goto out_error;
1415 			}
1416 			clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1417 			set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1418 		}
1419 
1420 		if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1421 			status = nfs4_check_lease(clp);
1422 			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1423 				continue;
1424 			if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1425 				goto out_error;
1426 		}
1427 
1428 		/* Initialize or reset the session */
1429 		if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1430 		   && nfs4_has_session(clp)) {
1431 			status = nfs4_reset_session(clp);
1432 			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1433 				continue;
1434 			if (status < 0)
1435 				goto out_error;
1436 		}
1437 
1438 		/* First recover reboot state... */
1439 		if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1440 			status = nfs4_do_reclaim(clp,
1441 				nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1442 			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1443 			    test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1444 				continue;
1445 			nfs4_state_end_reclaim_reboot(clp);
1446 			if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1447 				continue;
1448 			if (status < 0)
1449 				goto out_error;
1450 		}
1451 
1452 		/* Now recover expired state... */
1453 		if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1454 			status = nfs4_do_reclaim(clp,
1455 				nfs4_nograce_recovery_ops[clp->cl_minorversion]);
1456 			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1457 			    test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1458 			    test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1459 				continue;
1460 			if (status < 0)
1461 				goto out_error;
1462 		}
1463 
1464 		nfs4_end_drain_session(clp);
1465 		if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1466 			nfs_client_return_marked_delegations(clp);
1467 			continue;
1468 		}
1469 		/* Recall session slots */
1470 		if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
1471 		   && nfs4_has_session(clp)) {
1472 			status = nfs4_recall_slot(clp);
1473 			if (status < 0)
1474 				goto out_error;
1475 			continue;
1476 		}
1477 
1478 
1479 		nfs4_clear_state_manager_bit(clp);
1480 		/* Did we race with an attempt to give us more work? */
1481 		if (clp->cl_state == 0)
1482 			break;
1483 		if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1484 			break;
1485 	}
1486 	return;
1487 out_error:
1488 	printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1489 			" with error %d\n", clp->cl_hostname, -status);
1490 	nfs4_end_drain_session(clp);
1491 	nfs4_clear_state_manager_bit(clp);
1492 }
1493 
1494 static int nfs4_run_state_manager(void *ptr)
1495 {
1496 	struct nfs_client *clp = ptr;
1497 
1498 	allow_signal(SIGKILL);
1499 	nfs4_state_manager(clp);
1500 	nfs_put_client(clp);
1501 	module_put_and_exit(0);
1502 	return 0;
1503 }
1504 
1505 /*
1506  * Local variables:
1507  *  c-basic-offset: 8
1508  * End:
1509  */
1510