xref: /openbmc/linux/fs/nfs/nfs4state.c (revision f42b3800)
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40 
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
51 
52 #include "nfs4_fs.h"
53 #include "callback.h"
54 #include "delegation.h"
55 #include "internal.h"
56 
57 #define OPENOWNER_POOL_SIZE	8
58 
59 const nfs4_stateid zero_stateid;
60 
61 static LIST_HEAD(nfs4_clientid_list);
62 
63 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
64 {
65 	int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
66 			nfs_callback_tcpport, cred);
67 	if (status == 0)
68 		status = nfs4_proc_setclientid_confirm(clp, cred);
69 	if (status == 0)
70 		nfs4_schedule_state_renewal(clp);
71 	return status;
72 }
73 
74 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
75 {
76 	struct nfs4_state_owner *sp;
77 	struct rb_node *pos;
78 	struct rpc_cred *cred = NULL;
79 
80 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
81 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
82 		if (list_empty(&sp->so_states))
83 			continue;
84 		cred = get_rpccred(sp->so_cred);
85 		break;
86 	}
87 	return cred;
88 }
89 
90 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
91 {
92 	struct nfs4_state_owner *sp;
93 	struct rb_node *pos;
94 
95 	pos = rb_first(&clp->cl_state_owners);
96 	if (pos != NULL) {
97 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
98 		return get_rpccred(sp->so_cred);
99 	}
100 	return NULL;
101 }
102 
103 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
104 		__u64 minval, int maxbits)
105 {
106 	struct rb_node **p, *parent;
107 	struct nfs_unique_id *pos;
108 	__u64 mask = ~0ULL;
109 
110 	if (maxbits < 64)
111 		mask = (1ULL << maxbits) - 1ULL;
112 
113 	/* Ensure distribution is more or less flat */
114 	get_random_bytes(&new->id, sizeof(new->id));
115 	new->id &= mask;
116 	if (new->id < minval)
117 		new->id += minval;
118 retry:
119 	p = &root->rb_node;
120 	parent = NULL;
121 
122 	while (*p != NULL) {
123 		parent = *p;
124 		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
125 
126 		if (new->id < pos->id)
127 			p = &(*p)->rb_left;
128 		else if (new->id > pos->id)
129 			p = &(*p)->rb_right;
130 		else
131 			goto id_exists;
132 	}
133 	rb_link_node(&new->rb_node, parent, p);
134 	rb_insert_color(&new->rb_node, root);
135 	return;
136 id_exists:
137 	for (;;) {
138 		new->id++;
139 		if (new->id < minval || (new->id & mask) != new->id) {
140 			new->id = minval;
141 			break;
142 		}
143 		parent = rb_next(parent);
144 		if (parent == NULL)
145 			break;
146 		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
147 		if (new->id < pos->id)
148 			break;
149 	}
150 	goto retry;
151 }
152 
153 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
154 {
155 	rb_erase(&id->rb_node, root);
156 }
157 
158 static struct nfs4_state_owner *
159 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
160 {
161 	struct nfs_client *clp = server->nfs_client;
162 	struct rb_node **p = &clp->cl_state_owners.rb_node,
163 		       *parent = NULL;
164 	struct nfs4_state_owner *sp, *res = NULL;
165 
166 	while (*p != NULL) {
167 		parent = *p;
168 		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
169 
170 		if (server < sp->so_server) {
171 			p = &parent->rb_left;
172 			continue;
173 		}
174 		if (server > sp->so_server) {
175 			p = &parent->rb_right;
176 			continue;
177 		}
178 		if (cred < sp->so_cred)
179 			p = &parent->rb_left;
180 		else if (cred > sp->so_cred)
181 			p = &parent->rb_right;
182 		else {
183 			atomic_inc(&sp->so_count);
184 			res = sp;
185 			break;
186 		}
187 	}
188 	return res;
189 }
190 
191 static struct nfs4_state_owner *
192 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
193 {
194 	struct rb_node **p = &clp->cl_state_owners.rb_node,
195 		       *parent = NULL;
196 	struct nfs4_state_owner *sp;
197 
198 	while (*p != NULL) {
199 		parent = *p;
200 		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
201 
202 		if (new->so_server < sp->so_server) {
203 			p = &parent->rb_left;
204 			continue;
205 		}
206 		if (new->so_server > sp->so_server) {
207 			p = &parent->rb_right;
208 			continue;
209 		}
210 		if (new->so_cred < sp->so_cred)
211 			p = &parent->rb_left;
212 		else if (new->so_cred > sp->so_cred)
213 			p = &parent->rb_right;
214 		else {
215 			atomic_inc(&sp->so_count);
216 			return sp;
217 		}
218 	}
219 	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
220 	rb_link_node(&new->so_client_node, parent, p);
221 	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
222 	return new;
223 }
224 
225 static void
226 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
227 {
228 	if (!RB_EMPTY_NODE(&sp->so_client_node))
229 		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
230 	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
231 }
232 
233 /*
234  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
235  * create a new state_owner.
236  *
237  */
238 static struct nfs4_state_owner *
239 nfs4_alloc_state_owner(void)
240 {
241 	struct nfs4_state_owner *sp;
242 
243 	sp = kzalloc(sizeof(*sp),GFP_KERNEL);
244 	if (!sp)
245 		return NULL;
246 	spin_lock_init(&sp->so_lock);
247 	INIT_LIST_HEAD(&sp->so_states);
248 	INIT_LIST_HEAD(&sp->so_delegations);
249 	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
250 	sp->so_seqid.sequence = &sp->so_sequence;
251 	spin_lock_init(&sp->so_sequence.lock);
252 	INIT_LIST_HEAD(&sp->so_sequence.list);
253 	atomic_set(&sp->so_count, 1);
254 	return sp;
255 }
256 
257 void
258 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
259 {
260 	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
261 		struct nfs_client *clp = sp->so_client;
262 
263 		spin_lock(&clp->cl_lock);
264 		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
265 		RB_CLEAR_NODE(&sp->so_client_node);
266 		spin_unlock(&clp->cl_lock);
267 	}
268 }
269 
270 /*
271  * Note: must be called with clp->cl_sem held in order to prevent races
272  *       with reboot recovery!
273  */
274 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
275 {
276 	struct nfs_client *clp = server->nfs_client;
277 	struct nfs4_state_owner *sp, *new;
278 
279 	spin_lock(&clp->cl_lock);
280 	sp = nfs4_find_state_owner(server, cred);
281 	spin_unlock(&clp->cl_lock);
282 	if (sp != NULL)
283 		return sp;
284 	new = nfs4_alloc_state_owner();
285 	if (new == NULL)
286 		return NULL;
287 	new->so_client = clp;
288 	new->so_server = server;
289 	new->so_cred = cred;
290 	spin_lock(&clp->cl_lock);
291 	sp = nfs4_insert_state_owner(clp, new);
292 	spin_unlock(&clp->cl_lock);
293 	if (sp == new)
294 		get_rpccred(cred);
295 	else
296 		kfree(new);
297 	return sp;
298 }
299 
300 /*
301  * Must be called with clp->cl_sem held in order to avoid races
302  * with state recovery...
303  */
304 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
305 {
306 	struct nfs_client *clp = sp->so_client;
307 	struct rpc_cred *cred = sp->so_cred;
308 
309 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
310 		return;
311 	nfs4_remove_state_owner(clp, sp);
312 	spin_unlock(&clp->cl_lock);
313 	put_rpccred(cred);
314 	kfree(sp);
315 }
316 
317 static struct nfs4_state *
318 nfs4_alloc_open_state(void)
319 {
320 	struct nfs4_state *state;
321 
322 	state = kzalloc(sizeof(*state), GFP_KERNEL);
323 	if (!state)
324 		return NULL;
325 	atomic_set(&state->count, 1);
326 	INIT_LIST_HEAD(&state->lock_states);
327 	spin_lock_init(&state->state_lock);
328 	seqlock_init(&state->seqlock);
329 	return state;
330 }
331 
332 void
333 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
334 {
335 	if (state->state == mode)
336 		return;
337 	/* NB! List reordering - see the reclaim code for why.  */
338 	if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
339 		if (mode & FMODE_WRITE)
340 			list_move(&state->open_states, &state->owner->so_states);
341 		else
342 			list_move_tail(&state->open_states, &state->owner->so_states);
343 	}
344 	state->state = mode;
345 }
346 
347 static struct nfs4_state *
348 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
349 {
350 	struct nfs_inode *nfsi = NFS_I(inode);
351 	struct nfs4_state *state;
352 
353 	list_for_each_entry(state, &nfsi->open_states, inode_states) {
354 		if (state->owner != owner)
355 			continue;
356 		if (atomic_inc_not_zero(&state->count))
357 			return state;
358 	}
359 	return NULL;
360 }
361 
362 static void
363 nfs4_free_open_state(struct nfs4_state *state)
364 {
365 	kfree(state);
366 }
367 
368 struct nfs4_state *
369 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
370 {
371 	struct nfs4_state *state, *new;
372 	struct nfs_inode *nfsi = NFS_I(inode);
373 
374 	spin_lock(&inode->i_lock);
375 	state = __nfs4_find_state_byowner(inode, owner);
376 	spin_unlock(&inode->i_lock);
377 	if (state)
378 		goto out;
379 	new = nfs4_alloc_open_state();
380 	spin_lock(&owner->so_lock);
381 	spin_lock(&inode->i_lock);
382 	state = __nfs4_find_state_byowner(inode, owner);
383 	if (state == NULL && new != NULL) {
384 		state = new;
385 		state->owner = owner;
386 		atomic_inc(&owner->so_count);
387 		list_add(&state->inode_states, &nfsi->open_states);
388 		state->inode = igrab(inode);
389 		spin_unlock(&inode->i_lock);
390 		/* Note: The reclaim code dictates that we add stateless
391 		 * and read-only stateids to the end of the list */
392 		list_add_tail(&state->open_states, &owner->so_states);
393 		spin_unlock(&owner->so_lock);
394 	} else {
395 		spin_unlock(&inode->i_lock);
396 		spin_unlock(&owner->so_lock);
397 		if (new)
398 			nfs4_free_open_state(new);
399 	}
400 out:
401 	return state;
402 }
403 
404 /*
405  * Beware! Caller must be holding exactly one
406  * reference to clp->cl_sem!
407  */
408 void nfs4_put_open_state(struct nfs4_state *state)
409 {
410 	struct inode *inode = state->inode;
411 	struct nfs4_state_owner *owner = state->owner;
412 
413 	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
414 		return;
415 	spin_lock(&inode->i_lock);
416 	list_del(&state->inode_states);
417 	list_del(&state->open_states);
418 	spin_unlock(&inode->i_lock);
419 	spin_unlock(&owner->so_lock);
420 	iput(inode);
421 	nfs4_free_open_state(state);
422 	nfs4_put_state_owner(owner);
423 }
424 
425 /*
426  * Close the current file.
427  */
428 static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mode, int wait)
429 {
430 	struct nfs4_state_owner *owner = state->owner;
431 	int call_close = 0;
432 	int newstate;
433 
434 	atomic_inc(&owner->so_count);
435 	/* Protect against nfs4_find_state() */
436 	spin_lock(&owner->so_lock);
437 	switch (mode & (FMODE_READ | FMODE_WRITE)) {
438 		case FMODE_READ:
439 			state->n_rdonly--;
440 			break;
441 		case FMODE_WRITE:
442 			state->n_wronly--;
443 			break;
444 		case FMODE_READ|FMODE_WRITE:
445 			state->n_rdwr--;
446 	}
447 	newstate = FMODE_READ|FMODE_WRITE;
448 	if (state->n_rdwr == 0) {
449 		if (state->n_rdonly == 0) {
450 			newstate &= ~FMODE_READ;
451 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
452 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
453 		}
454 		if (state->n_wronly == 0) {
455 			newstate &= ~FMODE_WRITE;
456 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
457 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
458 		}
459 		if (newstate == 0)
460 			clear_bit(NFS_DELEGATED_STATE, &state->flags);
461 	}
462 	nfs4_state_set_mode_locked(state, newstate);
463 	spin_unlock(&owner->so_lock);
464 
465 	if (!call_close) {
466 		nfs4_put_open_state(state);
467 		nfs4_put_state_owner(owner);
468 	} else
469 		nfs4_do_close(path, state, wait);
470 }
471 
472 void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode)
473 {
474 	__nfs4_close(path, state, mode, 0);
475 }
476 
477 void nfs4_close_sync(struct path *path, struct nfs4_state *state, mode_t mode)
478 {
479 	__nfs4_close(path, state, mode, 1);
480 }
481 
482 /*
483  * Search the state->lock_states for an existing lock_owner
484  * that is compatible with current->files
485  */
486 static struct nfs4_lock_state *
487 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
488 {
489 	struct nfs4_lock_state *pos;
490 	list_for_each_entry(pos, &state->lock_states, ls_locks) {
491 		if (pos->ls_owner != fl_owner)
492 			continue;
493 		atomic_inc(&pos->ls_count);
494 		return pos;
495 	}
496 	return NULL;
497 }
498 
499 /*
500  * Return a compatible lock_state. If no initialized lock_state structure
501  * exists, return an uninitialized one.
502  *
503  */
504 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
505 {
506 	struct nfs4_lock_state *lsp;
507 	struct nfs_client *clp = state->owner->so_client;
508 
509 	lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
510 	if (lsp == NULL)
511 		return NULL;
512 	rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
513 	spin_lock_init(&lsp->ls_sequence.lock);
514 	INIT_LIST_HEAD(&lsp->ls_sequence.list);
515 	lsp->ls_seqid.sequence = &lsp->ls_sequence;
516 	atomic_set(&lsp->ls_count, 1);
517 	lsp->ls_owner = fl_owner;
518 	spin_lock(&clp->cl_lock);
519 	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
520 	spin_unlock(&clp->cl_lock);
521 	INIT_LIST_HEAD(&lsp->ls_locks);
522 	return lsp;
523 }
524 
525 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
526 {
527 	struct nfs_client *clp = lsp->ls_state->owner->so_client;
528 
529 	spin_lock(&clp->cl_lock);
530 	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
531 	spin_unlock(&clp->cl_lock);
532 	kfree(lsp);
533 }
534 
535 /*
536  * Return a compatible lock_state. If no initialized lock_state structure
537  * exists, return an uninitialized one.
538  *
539  * The caller must be holding clp->cl_sem
540  */
541 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
542 {
543 	struct nfs4_lock_state *lsp, *new = NULL;
544 
545 	for(;;) {
546 		spin_lock(&state->state_lock);
547 		lsp = __nfs4_find_lock_state(state, owner);
548 		if (lsp != NULL)
549 			break;
550 		if (new != NULL) {
551 			new->ls_state = state;
552 			list_add(&new->ls_locks, &state->lock_states);
553 			set_bit(LK_STATE_IN_USE, &state->flags);
554 			lsp = new;
555 			new = NULL;
556 			break;
557 		}
558 		spin_unlock(&state->state_lock);
559 		new = nfs4_alloc_lock_state(state, owner);
560 		if (new == NULL)
561 			return NULL;
562 	}
563 	spin_unlock(&state->state_lock);
564 	if (new != NULL)
565 		nfs4_free_lock_state(new);
566 	return lsp;
567 }
568 
569 /*
570  * Release reference to lock_state, and free it if we see that
571  * it is no longer in use
572  */
573 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
574 {
575 	struct nfs4_state *state;
576 
577 	if (lsp == NULL)
578 		return;
579 	state = lsp->ls_state;
580 	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
581 		return;
582 	list_del(&lsp->ls_locks);
583 	if (list_empty(&state->lock_states))
584 		clear_bit(LK_STATE_IN_USE, &state->flags);
585 	spin_unlock(&state->state_lock);
586 	nfs4_free_lock_state(lsp);
587 }
588 
589 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
590 {
591 	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
592 
593 	dst->fl_u.nfs4_fl.owner = lsp;
594 	atomic_inc(&lsp->ls_count);
595 }
596 
597 static void nfs4_fl_release_lock(struct file_lock *fl)
598 {
599 	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
600 }
601 
602 static struct file_lock_operations nfs4_fl_lock_ops = {
603 	.fl_copy_lock = nfs4_fl_copy_lock,
604 	.fl_release_private = nfs4_fl_release_lock,
605 };
606 
607 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
608 {
609 	struct nfs4_lock_state *lsp;
610 
611 	if (fl->fl_ops != NULL)
612 		return 0;
613 	lsp = nfs4_get_lock_state(state, fl->fl_owner);
614 	if (lsp == NULL)
615 		return -ENOMEM;
616 	fl->fl_u.nfs4_fl.owner = lsp;
617 	fl->fl_ops = &nfs4_fl_lock_ops;
618 	return 0;
619 }
620 
621 /*
622  * Byte-range lock aware utility to initialize the stateid of read/write
623  * requests.
624  */
625 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
626 {
627 	struct nfs4_lock_state *lsp;
628 	int seq;
629 
630 	do {
631 		seq = read_seqbegin(&state->seqlock);
632 		memcpy(dst, &state->stateid, sizeof(*dst));
633 	} while (read_seqretry(&state->seqlock, seq));
634 	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
635 		return;
636 
637 	spin_lock(&state->state_lock);
638 	lsp = __nfs4_find_lock_state(state, fl_owner);
639 	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
640 		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
641 	spin_unlock(&state->state_lock);
642 	nfs4_put_lock_state(lsp);
643 }
644 
645 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
646 {
647 	struct nfs_seqid *new;
648 
649 	new = kmalloc(sizeof(*new), GFP_KERNEL);
650 	if (new != NULL) {
651 		new->sequence = counter;
652 		INIT_LIST_HEAD(&new->list);
653 	}
654 	return new;
655 }
656 
657 void nfs_free_seqid(struct nfs_seqid *seqid)
658 {
659 	if (!list_empty(&seqid->list)) {
660 		struct rpc_sequence *sequence = seqid->sequence->sequence;
661 
662 		spin_lock(&sequence->lock);
663 		list_del(&seqid->list);
664 		spin_unlock(&sequence->lock);
665 		rpc_wake_up(&sequence->wait);
666 	}
667 	kfree(seqid);
668 }
669 
670 /*
671  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
672  * failed with a seqid incrementing error -
673  * see comments nfs_fs.h:seqid_mutating_error()
674  */
675 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
676 {
677 	BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
678 	switch (status) {
679 		case 0:
680 			break;
681 		case -NFS4ERR_BAD_SEQID:
682 			if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
683 				return;
684 			printk(KERN_WARNING "NFS: v4 server returned a bad"
685 					" sequence-id error on an"
686 					" unconfirmed sequence %p!\n",
687 					seqid->sequence);
688 		case -NFS4ERR_STALE_CLIENTID:
689 		case -NFS4ERR_STALE_STATEID:
690 		case -NFS4ERR_BAD_STATEID:
691 		case -NFS4ERR_BADXDR:
692 		case -NFS4ERR_RESOURCE:
693 		case -NFS4ERR_NOFILEHANDLE:
694 			/* Non-seqid mutating errors */
695 			return;
696 	};
697 	/*
698 	 * Note: no locking needed as we are guaranteed to be first
699 	 * on the sequence list
700 	 */
701 	seqid->sequence->counter++;
702 }
703 
704 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
705 {
706 	if (status == -NFS4ERR_BAD_SEQID) {
707 		struct nfs4_state_owner *sp = container_of(seqid->sequence,
708 				struct nfs4_state_owner, so_seqid);
709 		nfs4_drop_state_owner(sp);
710 	}
711 	nfs_increment_seqid(status, seqid);
712 }
713 
714 /*
715  * Increment the seqid if the LOCK/LOCKU succeeded, or
716  * failed with a seqid incrementing error -
717  * see comments nfs_fs.h:seqid_mutating_error()
718  */
719 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
720 {
721 	nfs_increment_seqid(status, seqid);
722 }
723 
724 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
725 {
726 	struct rpc_sequence *sequence = seqid->sequence->sequence;
727 	int status = 0;
728 
729 	spin_lock(&sequence->lock);
730 	if (list_empty(&seqid->list))
731 		list_add_tail(&seqid->list, &sequence->list);
732 	if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
733 		goto unlock;
734 	rpc_sleep_on(&sequence->wait, task, NULL, NULL);
735 	status = -EAGAIN;
736 unlock:
737 	spin_unlock(&sequence->lock);
738 	return status;
739 }
740 
741 static int reclaimer(void *);
742 
743 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
744 {
745 	smp_mb__before_clear_bit();
746 	clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
747 	smp_mb__after_clear_bit();
748 	wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
749 	rpc_wake_up(&clp->cl_rpcwaitq);
750 }
751 
752 /*
753  * State recovery routine
754  */
755 static void nfs4_recover_state(struct nfs_client *clp)
756 {
757 	struct task_struct *task;
758 
759 	__module_get(THIS_MODULE);
760 	atomic_inc(&clp->cl_count);
761 	task = kthread_run(reclaimer, clp, "%s-reclaim",
762 				rpc_peeraddr2str(clp->cl_rpcclient,
763 							RPC_DISPLAY_ADDR));
764 	if (!IS_ERR(task))
765 		return;
766 	nfs4_clear_recover_bit(clp);
767 	nfs_put_client(clp);
768 	module_put(THIS_MODULE);
769 }
770 
771 /*
772  * Schedule a state recovery attempt
773  */
774 void nfs4_schedule_state_recovery(struct nfs_client *clp)
775 {
776 	if (!clp)
777 		return;
778 	if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
779 		nfs4_recover_state(clp);
780 }
781 
782 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
783 {
784 	struct inode *inode = state->inode;
785 	struct file_lock *fl;
786 	int status = 0;
787 
788 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
789 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
790 			continue;
791 		if (nfs_file_open_context(fl->fl_file)->state != state)
792 			continue;
793 		status = ops->recover_lock(state, fl);
794 		if (status >= 0)
795 			continue;
796 		switch (status) {
797 			default:
798 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
799 						__FUNCTION__, status);
800 			case -NFS4ERR_EXPIRED:
801 			case -NFS4ERR_NO_GRACE:
802 			case -NFS4ERR_RECLAIM_BAD:
803 			case -NFS4ERR_RECLAIM_CONFLICT:
804 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
805 				break;
806 			case -NFS4ERR_STALE_CLIENTID:
807 				goto out_err;
808 		}
809 	}
810 	return 0;
811 out_err:
812 	return status;
813 }
814 
815 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
816 {
817 	struct nfs4_state *state;
818 	struct nfs4_lock_state *lock;
819 	int status = 0;
820 
821 	/* Note: we rely on the sp->so_states list being ordered
822 	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
823 	 * states first.
824 	 * This is needed to ensure that the server won't give us any
825 	 * read delegations that we have to return if, say, we are
826 	 * recovering after a network partition or a reboot from a
827 	 * server that doesn't support a grace period.
828 	 */
829 	list_for_each_entry(state, &sp->so_states, open_states) {
830 		if (state->state == 0)
831 			continue;
832 		status = ops->recover_open(sp, state);
833 		if (status >= 0) {
834 			status = nfs4_reclaim_locks(ops, state);
835 			if (status < 0)
836 				goto out_err;
837 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
838 				if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
839 					printk("%s: Lock reclaim failed!\n",
840 							__FUNCTION__);
841 			}
842 			continue;
843 		}
844 		switch (status) {
845 			default:
846 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
847 						__FUNCTION__, status);
848 			case -ENOENT:
849 			case -NFS4ERR_RECLAIM_BAD:
850 			case -NFS4ERR_RECLAIM_CONFLICT:
851 				/*
852 				 * Open state on this file cannot be recovered
853 				 * All we can do is revert to using the zero stateid.
854 				 */
855 				memset(state->stateid.data, 0,
856 					sizeof(state->stateid.data));
857 				/* Mark the file as being 'closed' */
858 				state->state = 0;
859 				break;
860 			case -NFS4ERR_EXPIRED:
861 			case -NFS4ERR_NO_GRACE:
862 			case -NFS4ERR_STALE_CLIENTID:
863 				goto out_err;
864 		}
865 	}
866 	return 0;
867 out_err:
868 	return status;
869 }
870 
871 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
872 {
873 	struct nfs4_state_owner *sp;
874 	struct rb_node *pos;
875 	struct nfs4_state *state;
876 	struct nfs4_lock_state *lock;
877 
878 	/* Reset all sequence ids to zero */
879 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
880 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
881 		sp->so_seqid.counter = 0;
882 		sp->so_seqid.flags = 0;
883 		spin_lock(&sp->so_lock);
884 		list_for_each_entry(state, &sp->so_states, open_states) {
885 			clear_bit(NFS_DELEGATED_STATE, &state->flags);
886 			clear_bit(NFS_O_RDONLY_STATE, &state->flags);
887 			clear_bit(NFS_O_WRONLY_STATE, &state->flags);
888 			clear_bit(NFS_O_RDWR_STATE, &state->flags);
889 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
890 				lock->ls_seqid.counter = 0;
891 				lock->ls_seqid.flags = 0;
892 				lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
893 			}
894 		}
895 		spin_unlock(&sp->so_lock);
896 	}
897 }
898 
899 static int reclaimer(void *ptr)
900 {
901 	struct nfs_client *clp = ptr;
902 	struct nfs4_state_owner *sp;
903 	struct rb_node *pos;
904 	struct nfs4_state_recovery_ops *ops;
905 	struct rpc_cred *cred;
906 	int status = 0;
907 
908 	allow_signal(SIGKILL);
909 
910 	/* Ensure exclusive access to NFSv4 state */
911 	lock_kernel();
912 	down_write(&clp->cl_sem);
913 	/* Are there any NFS mounts out there? */
914 	if (list_empty(&clp->cl_superblocks))
915 		goto out;
916 restart_loop:
917 	ops = &nfs4_network_partition_recovery_ops;
918 	/* Are there any open files on this volume? */
919 	cred = nfs4_get_renew_cred(clp);
920 	if (cred != NULL) {
921 		/* Yes there are: try to renew the old lease */
922 		status = nfs4_proc_renew(clp, cred);
923 		switch (status) {
924 			case 0:
925 			case -NFS4ERR_CB_PATH_DOWN:
926 				put_rpccred(cred);
927 				goto out;
928 			case -NFS4ERR_STALE_CLIENTID:
929 			case -NFS4ERR_LEASE_MOVED:
930 				ops = &nfs4_reboot_recovery_ops;
931 		}
932 	} else {
933 		/* "reboot" to ensure we clear all state on the server */
934 		clp->cl_boot_time = CURRENT_TIME;
935 		cred = nfs4_get_setclientid_cred(clp);
936 	}
937 	/* We're going to have to re-establish a clientid */
938 	nfs4_state_mark_reclaim(clp);
939 	status = -ENOENT;
940 	if (cred != NULL) {
941 		status = nfs4_init_client(clp, cred);
942 		put_rpccred(cred);
943 	}
944 	if (status)
945 		goto out_error;
946 	/* Mark all delegations for reclaim */
947 	nfs_delegation_mark_reclaim(clp);
948 	/* Note: list is protected by exclusive lock on cl->cl_sem */
949 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
950 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
951 		status = nfs4_reclaim_open_state(ops, sp);
952 		if (status < 0) {
953 			if (status == -NFS4ERR_NO_GRACE) {
954 				ops = &nfs4_network_partition_recovery_ops;
955 				status = nfs4_reclaim_open_state(ops, sp);
956 			}
957 			if (status == -NFS4ERR_STALE_CLIENTID)
958 				goto restart_loop;
959 			if (status == -NFS4ERR_EXPIRED)
960 				goto restart_loop;
961 		}
962 	}
963 	nfs_delegation_reap_unclaimed(clp);
964 out:
965 	up_write(&clp->cl_sem);
966 	unlock_kernel();
967 	if (status == -NFS4ERR_CB_PATH_DOWN)
968 		nfs_handle_cb_pathdown(clp);
969 	nfs4_clear_recover_bit(clp);
970 	nfs_put_client(clp);
971 	module_put_and_exit(0);
972 	return 0;
973 out_error:
974 	printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
975 			" with error %d\n", clp->cl_hostname, -status);
976 	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
977 	goto out;
978 }
979 
980 /*
981  * Local variables:
982  *  c-basic-offset: 8
983  * End:
984  */
985