xref: /openbmc/linux/fs/nfs/nfs4state.c (revision 87c2ce3b)
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40 
41 #include <linux/config.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
50 
51 #include "nfs4_fs.h"
52 #include "callback.h"
53 #include "delegation.h"
54 
55 #define OPENOWNER_POOL_SIZE	8
56 
57 const nfs4_stateid zero_stateid;
58 
59 static DEFINE_SPINLOCK(state_spinlock);
60 static LIST_HEAD(nfs4_clientid_list);
61 
62 void
63 init_nfsv4_state(struct nfs_server *server)
64 {
65 	server->nfs4_state = NULL;
66 	INIT_LIST_HEAD(&server->nfs4_siblings);
67 }
68 
69 void
70 destroy_nfsv4_state(struct nfs_server *server)
71 {
72 	kfree(server->mnt_path);
73 	server->mnt_path = NULL;
74 	if (server->nfs4_state) {
75 		nfs4_put_client(server->nfs4_state);
76 		server->nfs4_state = NULL;
77 	}
78 }
79 
80 /*
81  * nfs4_get_client(): returns an empty client structure
82  * nfs4_put_client(): drops reference to client structure
83  *
84  * Since these are allocated/deallocated very rarely, we don't
85  * bother putting them in a slab cache...
86  */
87 static struct nfs4_client *
88 nfs4_alloc_client(struct in_addr *addr)
89 {
90 	struct nfs4_client *clp;
91 
92 	if (nfs_callback_up() < 0)
93 		return NULL;
94 	if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
95 		nfs_callback_down();
96 		return NULL;
97 	}
98 	memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
99 	init_rwsem(&clp->cl_sem);
100 	INIT_LIST_HEAD(&clp->cl_delegations);
101 	INIT_LIST_HEAD(&clp->cl_state_owners);
102 	INIT_LIST_HEAD(&clp->cl_unused);
103 	spin_lock_init(&clp->cl_lock);
104 	atomic_set(&clp->cl_count, 1);
105 	INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
106 	INIT_LIST_HEAD(&clp->cl_superblocks);
107 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
108 	clp->cl_rpcclient = ERR_PTR(-EINVAL);
109 	clp->cl_boot_time = CURRENT_TIME;
110 	clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
111 	return clp;
112 }
113 
114 static void
115 nfs4_free_client(struct nfs4_client *clp)
116 {
117 	struct nfs4_state_owner *sp;
118 
119 	while (!list_empty(&clp->cl_unused)) {
120 		sp = list_entry(clp->cl_unused.next,
121 				struct nfs4_state_owner,
122 				so_list);
123 		list_del(&sp->so_list);
124 		kfree(sp);
125 	}
126 	BUG_ON(!list_empty(&clp->cl_state_owners));
127 	nfs_idmap_delete(clp);
128 	if (!IS_ERR(clp->cl_rpcclient))
129 		rpc_shutdown_client(clp->cl_rpcclient);
130 	kfree(clp);
131 	nfs_callback_down();
132 }
133 
134 static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
135 {
136 	struct nfs4_client *clp;
137 	list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
138 		if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
139 			atomic_inc(&clp->cl_count);
140 			return clp;
141 		}
142 	}
143 	return NULL;
144 }
145 
146 struct nfs4_client *nfs4_find_client(struct in_addr *addr)
147 {
148 	struct nfs4_client *clp;
149 	spin_lock(&state_spinlock);
150 	clp = __nfs4_find_client(addr);
151 	spin_unlock(&state_spinlock);
152 	return clp;
153 }
154 
155 struct nfs4_client *
156 nfs4_get_client(struct in_addr *addr)
157 {
158 	struct nfs4_client *clp, *new = NULL;
159 
160 	spin_lock(&state_spinlock);
161 	for (;;) {
162 		clp = __nfs4_find_client(addr);
163 		if (clp != NULL)
164 			break;
165 		clp = new;
166 		if (clp != NULL) {
167 			list_add(&clp->cl_servers, &nfs4_clientid_list);
168 			new = NULL;
169 			break;
170 		}
171 		spin_unlock(&state_spinlock);
172 		new = nfs4_alloc_client(addr);
173 		spin_lock(&state_spinlock);
174 		if (new == NULL)
175 			break;
176 	}
177 	spin_unlock(&state_spinlock);
178 	if (new)
179 		nfs4_free_client(new);
180 	return clp;
181 }
182 
183 void
184 nfs4_put_client(struct nfs4_client *clp)
185 {
186 	if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
187 		return;
188 	list_del(&clp->cl_servers);
189 	spin_unlock(&state_spinlock);
190 	BUG_ON(!list_empty(&clp->cl_superblocks));
191 	rpc_wake_up(&clp->cl_rpcwaitq);
192 	nfs4_kill_renewd(clp);
193 	nfs4_free_client(clp);
194 }
195 
196 static int nfs4_init_client(struct nfs4_client *clp, struct rpc_cred *cred)
197 {
198 	int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
199 			nfs_callback_tcpport, cred);
200 	if (status == 0)
201 		status = nfs4_proc_setclientid_confirm(clp, cred);
202 	if (status == 0)
203 		nfs4_schedule_state_renewal(clp);
204 	return status;
205 }
206 
207 u32
208 nfs4_alloc_lockowner_id(struct nfs4_client *clp)
209 {
210 	return clp->cl_lockowner_id ++;
211 }
212 
213 static struct nfs4_state_owner *
214 nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
215 {
216 	struct nfs4_state_owner *sp = NULL;
217 
218 	if (!list_empty(&clp->cl_unused)) {
219 		sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
220 		atomic_inc(&sp->so_count);
221 		sp->so_cred = cred;
222 		list_move(&sp->so_list, &clp->cl_state_owners);
223 		clp->cl_nunused--;
224 	}
225 	return sp;
226 }
227 
228 struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp)
229 {
230 	struct nfs4_state_owner *sp;
231 	struct rpc_cred *cred = NULL;
232 
233 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
234 		if (list_empty(&sp->so_states))
235 			continue;
236 		cred = get_rpccred(sp->so_cred);
237 		break;
238 	}
239 	return cred;
240 }
241 
242 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs4_client *clp)
243 {
244 	struct nfs4_state_owner *sp;
245 
246 	if (!list_empty(&clp->cl_state_owners)) {
247 		sp = list_entry(clp->cl_state_owners.next,
248 				struct nfs4_state_owner, so_list);
249 		return get_rpccred(sp->so_cred);
250 	}
251 	return NULL;
252 }
253 
254 static struct nfs4_state_owner *
255 nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
256 {
257 	struct nfs4_state_owner *sp, *res = NULL;
258 
259 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
260 		if (sp->so_cred != cred)
261 			continue;
262 		atomic_inc(&sp->so_count);
263 		/* Move to the head of the list */
264 		list_move(&sp->so_list, &clp->cl_state_owners);
265 		res = sp;
266 		break;
267 	}
268 	return res;
269 }
270 
271 /*
272  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
273  * create a new state_owner.
274  *
275  */
276 static struct nfs4_state_owner *
277 nfs4_alloc_state_owner(void)
278 {
279 	struct nfs4_state_owner *sp;
280 
281 	sp = kzalloc(sizeof(*sp),GFP_KERNEL);
282 	if (!sp)
283 		return NULL;
284 	spin_lock_init(&sp->so_lock);
285 	INIT_LIST_HEAD(&sp->so_states);
286 	INIT_LIST_HEAD(&sp->so_delegations);
287 	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
288 	sp->so_seqid.sequence = &sp->so_sequence;
289 	spin_lock_init(&sp->so_sequence.lock);
290 	INIT_LIST_HEAD(&sp->so_sequence.list);
291 	atomic_set(&sp->so_count, 1);
292 	return sp;
293 }
294 
295 void
296 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
297 {
298 	struct nfs4_client *clp = sp->so_client;
299 	spin_lock(&clp->cl_lock);
300 	list_del_init(&sp->so_list);
301 	spin_unlock(&clp->cl_lock);
302 }
303 
304 /*
305  * Note: must be called with clp->cl_sem held in order to prevent races
306  *       with reboot recovery!
307  */
308 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
309 {
310 	struct nfs4_client *clp = server->nfs4_state;
311 	struct nfs4_state_owner *sp, *new;
312 
313 	get_rpccred(cred);
314 	new = nfs4_alloc_state_owner();
315 	spin_lock(&clp->cl_lock);
316 	sp = nfs4_find_state_owner(clp, cred);
317 	if (sp == NULL)
318 		sp = nfs4_client_grab_unused(clp, cred);
319 	if (sp == NULL && new != NULL) {
320 		list_add(&new->so_list, &clp->cl_state_owners);
321 		new->so_client = clp;
322 		new->so_id = nfs4_alloc_lockowner_id(clp);
323 		new->so_cred = cred;
324 		sp = new;
325 		new = NULL;
326 	}
327 	spin_unlock(&clp->cl_lock);
328 	kfree(new);
329 	if (sp != NULL)
330 		return sp;
331 	put_rpccred(cred);
332 	return NULL;
333 }
334 
335 /*
336  * Must be called with clp->cl_sem held in order to avoid races
337  * with state recovery...
338  */
339 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
340 {
341 	struct nfs4_client *clp = sp->so_client;
342 	struct rpc_cred *cred = sp->so_cred;
343 
344 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
345 		return;
346 	if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
347 		goto out_free;
348 	if (list_empty(&sp->so_list))
349 		goto out_free;
350 	list_move(&sp->so_list, &clp->cl_unused);
351 	clp->cl_nunused++;
352 	spin_unlock(&clp->cl_lock);
353 	put_rpccred(cred);
354 	cred = NULL;
355 	return;
356 out_free:
357 	list_del(&sp->so_list);
358 	spin_unlock(&clp->cl_lock);
359 	put_rpccred(cred);
360 	kfree(sp);
361 }
362 
363 static struct nfs4_state *
364 nfs4_alloc_open_state(void)
365 {
366 	struct nfs4_state *state;
367 
368 	state = kzalloc(sizeof(*state), GFP_KERNEL);
369 	if (!state)
370 		return NULL;
371 	atomic_set(&state->count, 1);
372 	INIT_LIST_HEAD(&state->lock_states);
373 	spin_lock_init(&state->state_lock);
374 	return state;
375 }
376 
377 void
378 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
379 {
380 	if (state->state == mode)
381 		return;
382 	/* NB! List reordering - see the reclaim code for why.  */
383 	if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
384 		if (mode & FMODE_WRITE)
385 			list_move(&state->open_states, &state->owner->so_states);
386 		else
387 			list_move_tail(&state->open_states, &state->owner->so_states);
388 	}
389 	if (mode == 0)
390 		list_del_init(&state->inode_states);
391 	state->state = mode;
392 }
393 
394 static struct nfs4_state *
395 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
396 {
397 	struct nfs_inode *nfsi = NFS_I(inode);
398 	struct nfs4_state *state;
399 
400 	list_for_each_entry(state, &nfsi->open_states, inode_states) {
401 		/* Is this in the process of being freed? */
402 		if (state->state == 0)
403 			continue;
404 		if (state->owner == owner) {
405 			atomic_inc(&state->count);
406 			return state;
407 		}
408 	}
409 	return NULL;
410 }
411 
412 static void
413 nfs4_free_open_state(struct nfs4_state *state)
414 {
415 	kfree(state);
416 }
417 
418 struct nfs4_state *
419 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
420 {
421 	struct nfs4_state *state, *new;
422 	struct nfs_inode *nfsi = NFS_I(inode);
423 
424 	spin_lock(&inode->i_lock);
425 	state = __nfs4_find_state_byowner(inode, owner);
426 	spin_unlock(&inode->i_lock);
427 	if (state)
428 		goto out;
429 	new = nfs4_alloc_open_state();
430 	spin_lock(&owner->so_lock);
431 	spin_lock(&inode->i_lock);
432 	state = __nfs4_find_state_byowner(inode, owner);
433 	if (state == NULL && new != NULL) {
434 		state = new;
435 		state->owner = owner;
436 		atomic_inc(&owner->so_count);
437 		list_add(&state->inode_states, &nfsi->open_states);
438 		state->inode = igrab(inode);
439 		spin_unlock(&inode->i_lock);
440 		/* Note: The reclaim code dictates that we add stateless
441 		 * and read-only stateids to the end of the list */
442 		list_add_tail(&state->open_states, &owner->so_states);
443 		spin_unlock(&owner->so_lock);
444 	} else {
445 		spin_unlock(&inode->i_lock);
446 		spin_unlock(&owner->so_lock);
447 		if (new)
448 			nfs4_free_open_state(new);
449 	}
450 out:
451 	return state;
452 }
453 
454 /*
455  * Beware! Caller must be holding exactly one
456  * reference to clp->cl_sem!
457  */
458 void nfs4_put_open_state(struct nfs4_state *state)
459 {
460 	struct inode *inode = state->inode;
461 	struct nfs4_state_owner *owner = state->owner;
462 
463 	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
464 		return;
465 	spin_lock(&inode->i_lock);
466 	if (!list_empty(&state->inode_states))
467 		list_del(&state->inode_states);
468 	list_del(&state->open_states);
469 	spin_unlock(&inode->i_lock);
470 	spin_unlock(&owner->so_lock);
471 	iput(inode);
472 	nfs4_free_open_state(state);
473 	nfs4_put_state_owner(owner);
474 }
475 
476 /*
477  * Close the current file.
478  */
479 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
480 {
481 	struct inode *inode = state->inode;
482 	struct nfs4_state_owner *owner = state->owner;
483 	int oldstate, newstate = 0;
484 
485 	atomic_inc(&owner->so_count);
486 	/* Protect against nfs4_find_state() */
487 	spin_lock(&owner->so_lock);
488 	spin_lock(&inode->i_lock);
489 	switch (mode & (FMODE_READ | FMODE_WRITE)) {
490 		case FMODE_READ:
491 			state->n_rdonly--;
492 			break;
493 		case FMODE_WRITE:
494 			state->n_wronly--;
495 			break;
496 		case FMODE_READ|FMODE_WRITE:
497 			state->n_rdwr--;
498 	}
499 	oldstate = newstate = state->state;
500 	if (state->n_rdwr == 0) {
501 		if (state->n_rdonly == 0)
502 			newstate &= ~FMODE_READ;
503 		if (state->n_wronly == 0)
504 			newstate &= ~FMODE_WRITE;
505 	}
506 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
507 		nfs4_state_set_mode_locked(state, newstate);
508 		oldstate = newstate;
509 	}
510 	spin_unlock(&inode->i_lock);
511 	spin_unlock(&owner->so_lock);
512 
513 	if (oldstate != newstate && nfs4_do_close(inode, state) == 0)
514 		return;
515 	nfs4_put_open_state(state);
516 	nfs4_put_state_owner(owner);
517 }
518 
519 /*
520  * Search the state->lock_states for an existing lock_owner
521  * that is compatible with current->files
522  */
523 static struct nfs4_lock_state *
524 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
525 {
526 	struct nfs4_lock_state *pos;
527 	list_for_each_entry(pos, &state->lock_states, ls_locks) {
528 		if (pos->ls_owner != fl_owner)
529 			continue;
530 		atomic_inc(&pos->ls_count);
531 		return pos;
532 	}
533 	return NULL;
534 }
535 
536 /*
537  * Return a compatible lock_state. If no initialized lock_state structure
538  * exists, return an uninitialized one.
539  *
540  */
541 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
542 {
543 	struct nfs4_lock_state *lsp;
544 	struct nfs4_client *clp = state->owner->so_client;
545 
546 	lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
547 	if (lsp == NULL)
548 		return NULL;
549 	lsp->ls_seqid.sequence = &state->owner->so_sequence;
550 	atomic_set(&lsp->ls_count, 1);
551 	lsp->ls_owner = fl_owner;
552 	spin_lock(&clp->cl_lock);
553 	lsp->ls_id = nfs4_alloc_lockowner_id(clp);
554 	spin_unlock(&clp->cl_lock);
555 	INIT_LIST_HEAD(&lsp->ls_locks);
556 	return lsp;
557 }
558 
559 /*
560  * Return a compatible lock_state. If no initialized lock_state structure
561  * exists, return an uninitialized one.
562  *
563  * The caller must be holding clp->cl_sem
564  */
565 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
566 {
567 	struct nfs4_lock_state *lsp, *new = NULL;
568 
569 	for(;;) {
570 		spin_lock(&state->state_lock);
571 		lsp = __nfs4_find_lock_state(state, owner);
572 		if (lsp != NULL)
573 			break;
574 		if (new != NULL) {
575 			new->ls_state = state;
576 			list_add(&new->ls_locks, &state->lock_states);
577 			set_bit(LK_STATE_IN_USE, &state->flags);
578 			lsp = new;
579 			new = NULL;
580 			break;
581 		}
582 		spin_unlock(&state->state_lock);
583 		new = nfs4_alloc_lock_state(state, owner);
584 		if (new == NULL)
585 			return NULL;
586 	}
587 	spin_unlock(&state->state_lock);
588 	kfree(new);
589 	return lsp;
590 }
591 
592 /*
593  * Release reference to lock_state, and free it if we see that
594  * it is no longer in use
595  */
596 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
597 {
598 	struct nfs4_state *state;
599 
600 	if (lsp == NULL)
601 		return;
602 	state = lsp->ls_state;
603 	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
604 		return;
605 	list_del(&lsp->ls_locks);
606 	if (list_empty(&state->lock_states))
607 		clear_bit(LK_STATE_IN_USE, &state->flags);
608 	spin_unlock(&state->state_lock);
609 	kfree(lsp);
610 }
611 
612 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
613 {
614 	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
615 
616 	dst->fl_u.nfs4_fl.owner = lsp;
617 	atomic_inc(&lsp->ls_count);
618 }
619 
620 static void nfs4_fl_release_lock(struct file_lock *fl)
621 {
622 	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
623 }
624 
625 static struct file_lock_operations nfs4_fl_lock_ops = {
626 	.fl_copy_lock = nfs4_fl_copy_lock,
627 	.fl_release_private = nfs4_fl_release_lock,
628 };
629 
630 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
631 {
632 	struct nfs4_lock_state *lsp;
633 
634 	if (fl->fl_ops != NULL)
635 		return 0;
636 	lsp = nfs4_get_lock_state(state, fl->fl_owner);
637 	if (lsp == NULL)
638 		return -ENOMEM;
639 	fl->fl_u.nfs4_fl.owner = lsp;
640 	fl->fl_ops = &nfs4_fl_lock_ops;
641 	return 0;
642 }
643 
644 /*
645  * Byte-range lock aware utility to initialize the stateid of read/write
646  * requests.
647  */
648 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
649 {
650 	struct nfs4_lock_state *lsp;
651 
652 	memcpy(dst, &state->stateid, sizeof(*dst));
653 	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
654 		return;
655 
656 	spin_lock(&state->state_lock);
657 	lsp = __nfs4_find_lock_state(state, fl_owner);
658 	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
659 		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
660 	spin_unlock(&state->state_lock);
661 	nfs4_put_lock_state(lsp);
662 }
663 
664 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
665 {
666 	struct rpc_sequence *sequence = counter->sequence;
667 	struct nfs_seqid *new;
668 
669 	new = kmalloc(sizeof(*new), GFP_KERNEL);
670 	if (new != NULL) {
671 		new->sequence = counter;
672 		spin_lock(&sequence->lock);
673 		list_add_tail(&new->list, &sequence->list);
674 		spin_unlock(&sequence->lock);
675 	}
676 	return new;
677 }
678 
679 void nfs_free_seqid(struct nfs_seqid *seqid)
680 {
681 	struct rpc_sequence *sequence = seqid->sequence->sequence;
682 
683 	spin_lock(&sequence->lock);
684 	list_del(&seqid->list);
685 	spin_unlock(&sequence->lock);
686 	rpc_wake_up(&sequence->wait);
687 	kfree(seqid);
688 }
689 
690 /*
691  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
692  * failed with a seqid incrementing error -
693  * see comments nfs_fs.h:seqid_mutating_error()
694  */
695 static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
696 {
697 	switch (status) {
698 		case 0:
699 			break;
700 		case -NFS4ERR_BAD_SEQID:
701 		case -NFS4ERR_STALE_CLIENTID:
702 		case -NFS4ERR_STALE_STATEID:
703 		case -NFS4ERR_BAD_STATEID:
704 		case -NFS4ERR_BADXDR:
705 		case -NFS4ERR_RESOURCE:
706 		case -NFS4ERR_NOFILEHANDLE:
707 			/* Non-seqid mutating errors */
708 			return;
709 	};
710 	/*
711 	 * Note: no locking needed as we are guaranteed to be first
712 	 * on the sequence list
713 	 */
714 	seqid->sequence->counter++;
715 }
716 
717 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
718 {
719 	if (status == -NFS4ERR_BAD_SEQID) {
720 		struct nfs4_state_owner *sp = container_of(seqid->sequence,
721 				struct nfs4_state_owner, so_seqid);
722 		nfs4_drop_state_owner(sp);
723 	}
724 	return nfs_increment_seqid(status, seqid);
725 }
726 
727 /*
728  * Increment the seqid if the LOCK/LOCKU succeeded, or
729  * failed with a seqid incrementing error -
730  * see comments nfs_fs.h:seqid_mutating_error()
731  */
732 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
733 {
734 	return nfs_increment_seqid(status, seqid);
735 }
736 
737 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
738 {
739 	struct rpc_sequence *sequence = seqid->sequence->sequence;
740 	int status = 0;
741 
742 	if (sequence->list.next == &seqid->list)
743 		goto out;
744 	spin_lock(&sequence->lock);
745 	if (sequence->list.next != &seqid->list) {
746 		rpc_sleep_on(&sequence->wait, task, NULL, NULL);
747 		status = -EAGAIN;
748 	}
749 	spin_unlock(&sequence->lock);
750 out:
751 	return status;
752 }
753 
754 static int reclaimer(void *);
755 
756 static inline void nfs4_clear_recover_bit(struct nfs4_client *clp)
757 {
758 	smp_mb__before_clear_bit();
759 	clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
760 	smp_mb__after_clear_bit();
761 	wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
762 	rpc_wake_up(&clp->cl_rpcwaitq);
763 }
764 
765 /*
766  * State recovery routine
767  */
768 static void nfs4_recover_state(struct nfs4_client *clp)
769 {
770 	struct task_struct *task;
771 
772 	__module_get(THIS_MODULE);
773 	atomic_inc(&clp->cl_count);
774 	task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
775 			NIPQUAD(clp->cl_addr));
776 	if (!IS_ERR(task))
777 		return;
778 	nfs4_clear_recover_bit(clp);
779 	nfs4_put_client(clp);
780 	module_put(THIS_MODULE);
781 }
782 
783 /*
784  * Schedule a state recovery attempt
785  */
786 void nfs4_schedule_state_recovery(struct nfs4_client *clp)
787 {
788 	if (!clp)
789 		return;
790 	if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
791 		nfs4_recover_state(clp);
792 }
793 
794 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
795 {
796 	struct inode *inode = state->inode;
797 	struct file_lock *fl;
798 	int status = 0;
799 
800 	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
801 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
802 			continue;
803 		if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
804 			continue;
805 		status = ops->recover_lock(state, fl);
806 		if (status >= 0)
807 			continue;
808 		switch (status) {
809 			default:
810 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
811 						__FUNCTION__, status);
812 			case -NFS4ERR_EXPIRED:
813 			case -NFS4ERR_NO_GRACE:
814 			case -NFS4ERR_RECLAIM_BAD:
815 			case -NFS4ERR_RECLAIM_CONFLICT:
816 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
817 				break;
818 			case -NFS4ERR_STALE_CLIENTID:
819 				goto out_err;
820 		}
821 	}
822 	return 0;
823 out_err:
824 	return status;
825 }
826 
827 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
828 {
829 	struct nfs4_state *state;
830 	struct nfs4_lock_state *lock;
831 	int status = 0;
832 
833 	/* Note: we rely on the sp->so_states list being ordered
834 	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
835 	 * states first.
836 	 * This is needed to ensure that the server won't give us any
837 	 * read delegations that we have to return if, say, we are
838 	 * recovering after a network partition or a reboot from a
839 	 * server that doesn't support a grace period.
840 	 */
841 	list_for_each_entry(state, &sp->so_states, open_states) {
842 		if (state->state == 0)
843 			continue;
844 		status = ops->recover_open(sp, state);
845 		if (status >= 0) {
846 			status = nfs4_reclaim_locks(ops, state);
847 			if (status < 0)
848 				goto out_err;
849 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
850 				if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
851 					printk("%s: Lock reclaim failed!\n",
852 							__FUNCTION__);
853 			}
854 			continue;
855 		}
856 		switch (status) {
857 			default:
858 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
859 						__FUNCTION__, status);
860 			case -ENOENT:
861 			case -NFS4ERR_RECLAIM_BAD:
862 			case -NFS4ERR_RECLAIM_CONFLICT:
863 				/*
864 				 * Open state on this file cannot be recovered
865 				 * All we can do is revert to using the zero stateid.
866 				 */
867 				memset(state->stateid.data, 0,
868 					sizeof(state->stateid.data));
869 				/* Mark the file as being 'closed' */
870 				state->state = 0;
871 				break;
872 			case -NFS4ERR_EXPIRED:
873 			case -NFS4ERR_NO_GRACE:
874 			case -NFS4ERR_STALE_CLIENTID:
875 				goto out_err;
876 		}
877 	}
878 	return 0;
879 out_err:
880 	return status;
881 }
882 
883 static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
884 {
885 	struct nfs4_state_owner *sp;
886 	struct nfs4_state *state;
887 	struct nfs4_lock_state *lock;
888 
889 	/* Reset all sequence ids to zero */
890 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
891 		sp->so_seqid.counter = 0;
892 		sp->so_seqid.flags = 0;
893 		spin_lock(&sp->so_lock);
894 		list_for_each_entry(state, &sp->so_states, open_states) {
895 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
896 				lock->ls_seqid.counter = 0;
897 				lock->ls_seqid.flags = 0;
898 				lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
899 			}
900 		}
901 		spin_unlock(&sp->so_lock);
902 	}
903 }
904 
905 static int reclaimer(void *ptr)
906 {
907 	struct nfs4_client *clp = ptr;
908 	struct nfs4_state_owner *sp;
909 	struct nfs4_state_recovery_ops *ops;
910 	struct rpc_cred *cred;
911 	int status = 0;
912 
913 	allow_signal(SIGKILL);
914 
915 	/* Ensure exclusive access to NFSv4 state */
916 	lock_kernel();
917 	down_write(&clp->cl_sem);
918 	/* Are there any NFS mounts out there? */
919 	if (list_empty(&clp->cl_superblocks))
920 		goto out;
921 restart_loop:
922 	ops = &nfs4_network_partition_recovery_ops;
923 	/* Are there any open files on this volume? */
924 	cred = nfs4_get_renew_cred(clp);
925 	if (cred != NULL) {
926 		/* Yes there are: try to renew the old lease */
927 		status = nfs4_proc_renew(clp, cred);
928 		switch (status) {
929 			case 0:
930 			case -NFS4ERR_CB_PATH_DOWN:
931 				put_rpccred(cred);
932 				goto out;
933 			case -NFS4ERR_STALE_CLIENTID:
934 			case -NFS4ERR_LEASE_MOVED:
935 				ops = &nfs4_reboot_recovery_ops;
936 		}
937 	} else {
938 		/* "reboot" to ensure we clear all state on the server */
939 		clp->cl_boot_time = CURRENT_TIME;
940 		cred = nfs4_get_setclientid_cred(clp);
941 	}
942 	/* We're going to have to re-establish a clientid */
943 	nfs4_state_mark_reclaim(clp);
944 	status = -ENOENT;
945 	if (cred != NULL) {
946 		status = nfs4_init_client(clp, cred);
947 		put_rpccred(cred);
948 	}
949 	if (status)
950 		goto out_error;
951 	/* Mark all delegations for reclaim */
952 	nfs_delegation_mark_reclaim(clp);
953 	/* Note: list is protected by exclusive lock on cl->cl_sem */
954 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
955 		status = nfs4_reclaim_open_state(ops, sp);
956 		if (status < 0) {
957 			if (status == -NFS4ERR_NO_GRACE) {
958 				ops = &nfs4_network_partition_recovery_ops;
959 				status = nfs4_reclaim_open_state(ops, sp);
960 			}
961 			if (status == -NFS4ERR_STALE_CLIENTID)
962 				goto restart_loop;
963 			if (status == -NFS4ERR_EXPIRED)
964 				goto restart_loop;
965 		}
966 	}
967 	nfs_delegation_reap_unclaimed(clp);
968 out:
969 	up_write(&clp->cl_sem);
970 	unlock_kernel();
971 	if (status == -NFS4ERR_CB_PATH_DOWN)
972 		nfs_handle_cb_pathdown(clp);
973 	nfs4_clear_recover_bit(clp);
974 	nfs4_put_client(clp);
975 	module_put_and_exit(0);
976 	return 0;
977 out_error:
978 	printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
979 				NIPQUAD(clp->cl_addr.s_addr), -status);
980 	goto out;
981 }
982 
983 /*
984  * Local variables:
985  *  c-basic-offset: 8
986  * End:
987  */
988