xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision ddc141e5)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49 
50 #include "netns.h"
51 #include "pnfs.h"
52 
53 #define NFSDDBG_FACILITY                NFSDDBG_PROC
54 
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 	.si_generation = ~0,
58 	.si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 	/* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 	.si_generation = 1,
65 };
66 static const stateid_t close_stateid = {
67 	.si_generation = 0xffffffffU,
68 };
69 
70 static u64 current_sessionid = 1;
71 
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76 
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80 
81 /* Locking: */
82 
83 /*
84  * Currently used for the del_recall_lru and file hash table.  In an
85  * effort to decrease the scope of the client_mutex, this spinlock may
86  * eventually cover more:
87  */
88 static DEFINE_SPINLOCK(state_lock);
89 
90 enum nfsd4_st_mutex_lock_subclass {
91 	OPEN_STATEID_MUTEX = 0,
92 	LOCK_STATEID_MUTEX = 1,
93 };
94 
95 /*
96  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
97  * the refcount on the open stateid to drop.
98  */
99 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
100 
101 static struct kmem_cache *openowner_slab;
102 static struct kmem_cache *lockowner_slab;
103 static struct kmem_cache *file_slab;
104 static struct kmem_cache *stateid_slab;
105 static struct kmem_cache *deleg_slab;
106 static struct kmem_cache *odstate_slab;
107 
108 static void free_session(struct nfsd4_session *);
109 
110 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
111 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
112 
113 static bool is_session_dead(struct nfsd4_session *ses)
114 {
115 	return ses->se_flags & NFS4_SESSION_DEAD;
116 }
117 
118 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
119 {
120 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
121 		return nfserr_jukebox;
122 	ses->se_flags |= NFS4_SESSION_DEAD;
123 	return nfs_ok;
124 }
125 
126 static bool is_client_expired(struct nfs4_client *clp)
127 {
128 	return clp->cl_time == 0;
129 }
130 
131 static __be32 get_client_locked(struct nfs4_client *clp)
132 {
133 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
134 
135 	lockdep_assert_held(&nn->client_lock);
136 
137 	if (is_client_expired(clp))
138 		return nfserr_expired;
139 	atomic_inc(&clp->cl_refcount);
140 	return nfs_ok;
141 }
142 
143 /* must be called under the client_lock */
144 static inline void
145 renew_client_locked(struct nfs4_client *clp)
146 {
147 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
148 
149 	if (is_client_expired(clp)) {
150 		WARN_ON(1);
151 		printk("%s: client (clientid %08x/%08x) already expired\n",
152 			__func__,
153 			clp->cl_clientid.cl_boot,
154 			clp->cl_clientid.cl_id);
155 		return;
156 	}
157 
158 	dprintk("renewing client (clientid %08x/%08x)\n",
159 			clp->cl_clientid.cl_boot,
160 			clp->cl_clientid.cl_id);
161 	list_move_tail(&clp->cl_lru, &nn->client_lru);
162 	clp->cl_time = get_seconds();
163 }
164 
165 static void put_client_renew_locked(struct nfs4_client *clp)
166 {
167 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
168 
169 	lockdep_assert_held(&nn->client_lock);
170 
171 	if (!atomic_dec_and_test(&clp->cl_refcount))
172 		return;
173 	if (!is_client_expired(clp))
174 		renew_client_locked(clp);
175 }
176 
177 static void put_client_renew(struct nfs4_client *clp)
178 {
179 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
180 
181 	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
182 		return;
183 	if (!is_client_expired(clp))
184 		renew_client_locked(clp);
185 	spin_unlock(&nn->client_lock);
186 }
187 
188 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
189 {
190 	__be32 status;
191 
192 	if (is_session_dead(ses))
193 		return nfserr_badsession;
194 	status = get_client_locked(ses->se_client);
195 	if (status)
196 		return status;
197 	atomic_inc(&ses->se_ref);
198 	return nfs_ok;
199 }
200 
201 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
202 {
203 	struct nfs4_client *clp = ses->se_client;
204 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
205 
206 	lockdep_assert_held(&nn->client_lock);
207 
208 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
209 		free_session(ses);
210 	put_client_renew_locked(clp);
211 }
212 
213 static void nfsd4_put_session(struct nfsd4_session *ses)
214 {
215 	struct nfs4_client *clp = ses->se_client;
216 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
217 
218 	spin_lock(&nn->client_lock);
219 	nfsd4_put_session_locked(ses);
220 	spin_unlock(&nn->client_lock);
221 }
222 
223 static struct nfsd4_blocked_lock *
224 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
225 			struct nfsd_net *nn)
226 {
227 	struct nfsd4_blocked_lock *cur, *found = NULL;
228 
229 	spin_lock(&nn->blocked_locks_lock);
230 	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
231 		if (fh_match(fh, &cur->nbl_fh)) {
232 			list_del_init(&cur->nbl_list);
233 			list_del_init(&cur->nbl_lru);
234 			found = cur;
235 			break;
236 		}
237 	}
238 	spin_unlock(&nn->blocked_locks_lock);
239 	if (found)
240 		posix_unblock_lock(&found->nbl_lock);
241 	return found;
242 }
243 
244 static struct nfsd4_blocked_lock *
245 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
246 			struct nfsd_net *nn)
247 {
248 	struct nfsd4_blocked_lock *nbl;
249 
250 	nbl = find_blocked_lock(lo, fh, nn);
251 	if (!nbl) {
252 		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
253 		if (nbl) {
254 			fh_copy_shallow(&nbl->nbl_fh, fh);
255 			locks_init_lock(&nbl->nbl_lock);
256 			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
257 					&nfsd4_cb_notify_lock_ops,
258 					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
259 		}
260 	}
261 	return nbl;
262 }
263 
264 static void
265 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
266 {
267 	locks_release_private(&nbl->nbl_lock);
268 	kfree(nbl);
269 }
270 
271 static int
272 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
273 {
274 	/*
275 	 * Since this is just an optimization, we don't try very hard if it
276 	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
277 	 * just quit trying on anything else.
278 	 */
279 	switch (task->tk_status) {
280 	case -NFS4ERR_DELAY:
281 		rpc_delay(task, 1 * HZ);
282 		return 0;
283 	default:
284 		return 1;
285 	}
286 }
287 
288 static void
289 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
290 {
291 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
292 						struct nfsd4_blocked_lock, nbl_cb);
293 
294 	free_blocked_lock(nbl);
295 }
296 
297 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
298 	.done		= nfsd4_cb_notify_lock_done,
299 	.release	= nfsd4_cb_notify_lock_release,
300 };
301 
302 static inline struct nfs4_stateowner *
303 nfs4_get_stateowner(struct nfs4_stateowner *sop)
304 {
305 	atomic_inc(&sop->so_count);
306 	return sop;
307 }
308 
309 static int
310 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
311 {
312 	return (sop->so_owner.len == owner->len) &&
313 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
314 }
315 
316 static struct nfs4_openowner *
317 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
318 			struct nfs4_client *clp)
319 {
320 	struct nfs4_stateowner *so;
321 
322 	lockdep_assert_held(&clp->cl_lock);
323 
324 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
325 			    so_strhash) {
326 		if (!so->so_is_open_owner)
327 			continue;
328 		if (same_owner_str(so, &open->op_owner))
329 			return openowner(nfs4_get_stateowner(so));
330 	}
331 	return NULL;
332 }
333 
334 static struct nfs4_openowner *
335 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
336 			struct nfs4_client *clp)
337 {
338 	struct nfs4_openowner *oo;
339 
340 	spin_lock(&clp->cl_lock);
341 	oo = find_openstateowner_str_locked(hashval, open, clp);
342 	spin_unlock(&clp->cl_lock);
343 	return oo;
344 }
345 
346 static inline u32
347 opaque_hashval(const void *ptr, int nbytes)
348 {
349 	unsigned char *cptr = (unsigned char *) ptr;
350 
351 	u32 x = 0;
352 	while (nbytes--) {
353 		x *= 37;
354 		x += *cptr++;
355 	}
356 	return x;
357 }
358 
359 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
360 {
361 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
362 
363 	kmem_cache_free(file_slab, fp);
364 }
365 
366 void
367 put_nfs4_file(struct nfs4_file *fi)
368 {
369 	might_lock(&state_lock);
370 
371 	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
372 		hlist_del_rcu(&fi->fi_hash);
373 		spin_unlock(&state_lock);
374 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
375 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
376 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
377 	}
378 }
379 
380 static struct file *
381 __nfs4_get_fd(struct nfs4_file *f, int oflag)
382 {
383 	if (f->fi_fds[oflag])
384 		return get_file(f->fi_fds[oflag]);
385 	return NULL;
386 }
387 
388 static struct file *
389 find_writeable_file_locked(struct nfs4_file *f)
390 {
391 	struct file *ret;
392 
393 	lockdep_assert_held(&f->fi_lock);
394 
395 	ret = __nfs4_get_fd(f, O_WRONLY);
396 	if (!ret)
397 		ret = __nfs4_get_fd(f, O_RDWR);
398 	return ret;
399 }
400 
401 static struct file *
402 find_writeable_file(struct nfs4_file *f)
403 {
404 	struct file *ret;
405 
406 	spin_lock(&f->fi_lock);
407 	ret = find_writeable_file_locked(f);
408 	spin_unlock(&f->fi_lock);
409 
410 	return ret;
411 }
412 
413 static struct file *find_readable_file_locked(struct nfs4_file *f)
414 {
415 	struct file *ret;
416 
417 	lockdep_assert_held(&f->fi_lock);
418 
419 	ret = __nfs4_get_fd(f, O_RDONLY);
420 	if (!ret)
421 		ret = __nfs4_get_fd(f, O_RDWR);
422 	return ret;
423 }
424 
425 static struct file *
426 find_readable_file(struct nfs4_file *f)
427 {
428 	struct file *ret;
429 
430 	spin_lock(&f->fi_lock);
431 	ret = find_readable_file_locked(f);
432 	spin_unlock(&f->fi_lock);
433 
434 	return ret;
435 }
436 
437 struct file *
438 find_any_file(struct nfs4_file *f)
439 {
440 	struct file *ret;
441 
442 	spin_lock(&f->fi_lock);
443 	ret = __nfs4_get_fd(f, O_RDWR);
444 	if (!ret) {
445 		ret = __nfs4_get_fd(f, O_WRONLY);
446 		if (!ret)
447 			ret = __nfs4_get_fd(f, O_RDONLY);
448 	}
449 	spin_unlock(&f->fi_lock);
450 	return ret;
451 }
452 
453 static atomic_long_t num_delegations;
454 unsigned long max_delegations;
455 
456 /*
457  * Open owner state (share locks)
458  */
459 
460 /* hash tables for lock and open owners */
461 #define OWNER_HASH_BITS              8
462 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
463 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
464 
465 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
466 {
467 	unsigned int ret;
468 
469 	ret = opaque_hashval(ownername->data, ownername->len);
470 	return ret & OWNER_HASH_MASK;
471 }
472 
473 /* hash table for nfs4_file */
474 #define FILE_HASH_BITS                   8
475 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
476 
477 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
478 {
479 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
480 }
481 
482 static unsigned int file_hashval(struct knfsd_fh *fh)
483 {
484 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
485 }
486 
487 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
488 
489 static void
490 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
491 {
492 	lockdep_assert_held(&fp->fi_lock);
493 
494 	if (access & NFS4_SHARE_ACCESS_WRITE)
495 		atomic_inc(&fp->fi_access[O_WRONLY]);
496 	if (access & NFS4_SHARE_ACCESS_READ)
497 		atomic_inc(&fp->fi_access[O_RDONLY]);
498 }
499 
500 static __be32
501 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
502 {
503 	lockdep_assert_held(&fp->fi_lock);
504 
505 	/* Does this access mode make sense? */
506 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
507 		return nfserr_inval;
508 
509 	/* Does it conflict with a deny mode already set? */
510 	if ((access & fp->fi_share_deny) != 0)
511 		return nfserr_share_denied;
512 
513 	__nfs4_file_get_access(fp, access);
514 	return nfs_ok;
515 }
516 
517 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
518 {
519 	/* Common case is that there is no deny mode. */
520 	if (deny) {
521 		/* Does this deny mode make sense? */
522 		if (deny & ~NFS4_SHARE_DENY_BOTH)
523 			return nfserr_inval;
524 
525 		if ((deny & NFS4_SHARE_DENY_READ) &&
526 		    atomic_read(&fp->fi_access[O_RDONLY]))
527 			return nfserr_share_denied;
528 
529 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
530 		    atomic_read(&fp->fi_access[O_WRONLY]))
531 			return nfserr_share_denied;
532 	}
533 	return nfs_ok;
534 }
535 
536 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
537 {
538 	might_lock(&fp->fi_lock);
539 
540 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
541 		struct file *f1 = NULL;
542 		struct file *f2 = NULL;
543 
544 		swap(f1, fp->fi_fds[oflag]);
545 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
546 			swap(f2, fp->fi_fds[O_RDWR]);
547 		spin_unlock(&fp->fi_lock);
548 		if (f1)
549 			fput(f1);
550 		if (f2)
551 			fput(f2);
552 	}
553 }
554 
555 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
556 {
557 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
558 
559 	if (access & NFS4_SHARE_ACCESS_WRITE)
560 		__nfs4_file_put_access(fp, O_WRONLY);
561 	if (access & NFS4_SHARE_ACCESS_READ)
562 		__nfs4_file_put_access(fp, O_RDONLY);
563 }
564 
565 /*
566  * Allocate a new open/delegation state counter. This is needed for
567  * pNFS for proper return on close semantics.
568  *
569  * Note that we only allocate it for pNFS-enabled exports, otherwise
570  * all pointers to struct nfs4_clnt_odstate are always NULL.
571  */
572 static struct nfs4_clnt_odstate *
573 alloc_clnt_odstate(struct nfs4_client *clp)
574 {
575 	struct nfs4_clnt_odstate *co;
576 
577 	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
578 	if (co) {
579 		co->co_client = clp;
580 		refcount_set(&co->co_odcount, 1);
581 	}
582 	return co;
583 }
584 
585 static void
586 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
587 {
588 	struct nfs4_file *fp = co->co_file;
589 
590 	lockdep_assert_held(&fp->fi_lock);
591 	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
592 }
593 
594 static inline void
595 get_clnt_odstate(struct nfs4_clnt_odstate *co)
596 {
597 	if (co)
598 		refcount_inc(&co->co_odcount);
599 }
600 
601 static void
602 put_clnt_odstate(struct nfs4_clnt_odstate *co)
603 {
604 	struct nfs4_file *fp;
605 
606 	if (!co)
607 		return;
608 
609 	fp = co->co_file;
610 	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
611 		list_del(&co->co_perfile);
612 		spin_unlock(&fp->fi_lock);
613 
614 		nfsd4_return_all_file_layouts(co->co_client, fp);
615 		kmem_cache_free(odstate_slab, co);
616 	}
617 }
618 
619 static struct nfs4_clnt_odstate *
620 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
621 {
622 	struct nfs4_clnt_odstate *co;
623 	struct nfs4_client *cl;
624 
625 	if (!new)
626 		return NULL;
627 
628 	cl = new->co_client;
629 
630 	spin_lock(&fp->fi_lock);
631 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
632 		if (co->co_client == cl) {
633 			get_clnt_odstate(co);
634 			goto out;
635 		}
636 	}
637 	co = new;
638 	co->co_file = fp;
639 	hash_clnt_odstate_locked(new);
640 out:
641 	spin_unlock(&fp->fi_lock);
642 	return co;
643 }
644 
645 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
646 				  void (*sc_free)(struct nfs4_stid *))
647 {
648 	struct nfs4_stid *stid;
649 	int new_id;
650 
651 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
652 	if (!stid)
653 		return NULL;
654 
655 	idr_preload(GFP_KERNEL);
656 	spin_lock(&cl->cl_lock);
657 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
658 	spin_unlock(&cl->cl_lock);
659 	idr_preload_end();
660 	if (new_id < 0)
661 		goto out_free;
662 
663 	stid->sc_free = sc_free;
664 	stid->sc_client = cl;
665 	stid->sc_stateid.si_opaque.so_id = new_id;
666 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
667 	/* Will be incremented before return to client: */
668 	refcount_set(&stid->sc_count, 1);
669 	spin_lock_init(&stid->sc_lock);
670 
671 	/*
672 	 * It shouldn't be a problem to reuse an opaque stateid value.
673 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
674 	 * example, a stray write retransmission could be accepted by
675 	 * the server when it should have been rejected.  Therefore,
676 	 * adopt a trick from the sctp code to attempt to maximize the
677 	 * amount of time until an id is reused, by ensuring they always
678 	 * "increase" (mod INT_MAX):
679 	 */
680 	return stid;
681 out_free:
682 	kmem_cache_free(slab, stid);
683 	return NULL;
684 }
685 
686 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
687 {
688 	struct nfs4_stid *stid;
689 
690 	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
691 	if (!stid)
692 		return NULL;
693 
694 	return openlockstateid(stid);
695 }
696 
697 static void nfs4_free_deleg(struct nfs4_stid *stid)
698 {
699 	kmem_cache_free(deleg_slab, stid);
700 	atomic_long_dec(&num_delegations);
701 }
702 
703 /*
704  * When we recall a delegation, we should be careful not to hand it
705  * out again straight away.
706  * To ensure this we keep a pair of bloom filters ('new' and 'old')
707  * in which the filehandles of recalled delegations are "stored".
708  * If a filehandle appear in either filter, a delegation is blocked.
709  * When a delegation is recalled, the filehandle is stored in the "new"
710  * filter.
711  * Every 30 seconds we swap the filters and clear the "new" one,
712  * unless both are empty of course.
713  *
714  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
715  * low 3 bytes as hash-table indices.
716  *
717  * 'blocked_delegations_lock', which is always taken in block_delegations(),
718  * is used to manage concurrent access.  Testing does not need the lock
719  * except when swapping the two filters.
720  */
721 static DEFINE_SPINLOCK(blocked_delegations_lock);
722 static struct bloom_pair {
723 	int	entries, old_entries;
724 	time_t	swap_time;
725 	int	new; /* index into 'set' */
726 	DECLARE_BITMAP(set[2], 256);
727 } blocked_delegations;
728 
729 static int delegation_blocked(struct knfsd_fh *fh)
730 {
731 	u32 hash;
732 	struct bloom_pair *bd = &blocked_delegations;
733 
734 	if (bd->entries == 0)
735 		return 0;
736 	if (seconds_since_boot() - bd->swap_time > 30) {
737 		spin_lock(&blocked_delegations_lock);
738 		if (seconds_since_boot() - bd->swap_time > 30) {
739 			bd->entries -= bd->old_entries;
740 			bd->old_entries = bd->entries;
741 			memset(bd->set[bd->new], 0,
742 			       sizeof(bd->set[0]));
743 			bd->new = 1-bd->new;
744 			bd->swap_time = seconds_since_boot();
745 		}
746 		spin_unlock(&blocked_delegations_lock);
747 	}
748 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
749 	if (test_bit(hash&255, bd->set[0]) &&
750 	    test_bit((hash>>8)&255, bd->set[0]) &&
751 	    test_bit((hash>>16)&255, bd->set[0]))
752 		return 1;
753 
754 	if (test_bit(hash&255, bd->set[1]) &&
755 	    test_bit((hash>>8)&255, bd->set[1]) &&
756 	    test_bit((hash>>16)&255, bd->set[1]))
757 		return 1;
758 
759 	return 0;
760 }
761 
762 static void block_delegations(struct knfsd_fh *fh)
763 {
764 	u32 hash;
765 	struct bloom_pair *bd = &blocked_delegations;
766 
767 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
768 
769 	spin_lock(&blocked_delegations_lock);
770 	__set_bit(hash&255, bd->set[bd->new]);
771 	__set_bit((hash>>8)&255, bd->set[bd->new]);
772 	__set_bit((hash>>16)&255, bd->set[bd->new]);
773 	if (bd->entries == 0)
774 		bd->swap_time = seconds_since_boot();
775 	bd->entries += 1;
776 	spin_unlock(&blocked_delegations_lock);
777 }
778 
779 static struct nfs4_delegation *
780 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
781 		 struct nfs4_clnt_odstate *odstate)
782 {
783 	struct nfs4_delegation *dp;
784 	long n;
785 
786 	dprintk("NFSD alloc_init_deleg\n");
787 	n = atomic_long_inc_return(&num_delegations);
788 	if (n < 0 || n > max_delegations)
789 		goto out_dec;
790 	if (delegation_blocked(&current_fh->fh_handle))
791 		goto out_dec;
792 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
793 	if (dp == NULL)
794 		goto out_dec;
795 
796 	/*
797 	 * delegation seqid's are never incremented.  The 4.1 special
798 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
799 	 * 0 anyway just for consistency and use 1:
800 	 */
801 	dp->dl_stid.sc_stateid.si_generation = 1;
802 	INIT_LIST_HEAD(&dp->dl_perfile);
803 	INIT_LIST_HEAD(&dp->dl_perclnt);
804 	INIT_LIST_HEAD(&dp->dl_recall_lru);
805 	dp->dl_clnt_odstate = odstate;
806 	get_clnt_odstate(odstate);
807 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
808 	dp->dl_retries = 1;
809 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
810 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
811 	return dp;
812 out_dec:
813 	atomic_long_dec(&num_delegations);
814 	return NULL;
815 }
816 
817 void
818 nfs4_put_stid(struct nfs4_stid *s)
819 {
820 	struct nfs4_file *fp = s->sc_file;
821 	struct nfs4_client *clp = s->sc_client;
822 
823 	might_lock(&clp->cl_lock);
824 
825 	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
826 		wake_up_all(&close_wq);
827 		return;
828 	}
829 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
830 	spin_unlock(&clp->cl_lock);
831 	s->sc_free(s);
832 	if (fp)
833 		put_nfs4_file(fp);
834 }
835 
836 void
837 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
838 {
839 	stateid_t *src = &stid->sc_stateid;
840 
841 	spin_lock(&stid->sc_lock);
842 	if (unlikely(++src->si_generation == 0))
843 		src->si_generation = 1;
844 	memcpy(dst, src, sizeof(*dst));
845 	spin_unlock(&stid->sc_lock);
846 }
847 
848 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
849 {
850 	struct file *filp = NULL;
851 
852 	spin_lock(&fp->fi_lock);
853 	if (fp->fi_deleg_file && --fp->fi_delegees == 0)
854 		swap(filp, fp->fi_deleg_file);
855 	spin_unlock(&fp->fi_lock);
856 
857 	if (filp) {
858 		vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
859 		fput(filp);
860 	}
861 }
862 
863 void nfs4_unhash_stid(struct nfs4_stid *s)
864 {
865 	s->sc_type = 0;
866 }
867 
868 /**
869  * nfs4_get_existing_delegation - Discover if this delegation already exists
870  * @clp:     a pointer to the nfs4_client we're granting a delegation to
871  * @fp:      a pointer to the nfs4_file we're granting a delegation on
872  *
873  * Return:
874  *      On success: NULL if an existing delegation was not found.
875  *
876  *      On error: -EAGAIN if one was previously granted to this nfs4_client
877  *                 for this nfs4_file.
878  *
879  */
880 
881 static int
882 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
883 {
884 	struct nfs4_delegation *searchdp = NULL;
885 	struct nfs4_client *searchclp = NULL;
886 
887 	lockdep_assert_held(&state_lock);
888 	lockdep_assert_held(&fp->fi_lock);
889 
890 	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
891 		searchclp = searchdp->dl_stid.sc_client;
892 		if (clp == searchclp) {
893 			return -EAGAIN;
894 		}
895 	}
896 	return 0;
897 }
898 
899 /**
900  * hash_delegation_locked - Add a delegation to the appropriate lists
901  * @dp:     a pointer to the nfs4_delegation we are adding.
902  * @fp:     a pointer to the nfs4_file we're granting a delegation on
903  *
904  * Return:
905  *      On success: NULL if the delegation was successfully hashed.
906  *
907  *      On error: -EAGAIN if one was previously granted to this
908  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
909  *
910  */
911 
912 static int
913 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
914 {
915 	int status;
916 	struct nfs4_client *clp = dp->dl_stid.sc_client;
917 
918 	lockdep_assert_held(&state_lock);
919 	lockdep_assert_held(&fp->fi_lock);
920 
921 	status = nfs4_get_existing_delegation(clp, fp);
922 	if (status)
923 		return status;
924 	++fp->fi_delegees;
925 	refcount_inc(&dp->dl_stid.sc_count);
926 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
927 	list_add(&dp->dl_perfile, &fp->fi_delegations);
928 	list_add(&dp->dl_perclnt, &clp->cl_delegations);
929 	return 0;
930 }
931 
932 static bool
933 unhash_delegation_locked(struct nfs4_delegation *dp)
934 {
935 	struct nfs4_file *fp = dp->dl_stid.sc_file;
936 
937 	lockdep_assert_held(&state_lock);
938 
939 	if (list_empty(&dp->dl_perfile))
940 		return false;
941 
942 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
943 	/* Ensure that deleg break won't try to requeue it */
944 	++dp->dl_time;
945 	spin_lock(&fp->fi_lock);
946 	list_del_init(&dp->dl_perclnt);
947 	list_del_init(&dp->dl_recall_lru);
948 	list_del_init(&dp->dl_perfile);
949 	spin_unlock(&fp->fi_lock);
950 	return true;
951 }
952 
953 static void destroy_delegation(struct nfs4_delegation *dp)
954 {
955 	bool unhashed;
956 
957 	spin_lock(&state_lock);
958 	unhashed = unhash_delegation_locked(dp);
959 	spin_unlock(&state_lock);
960 	if (unhashed) {
961 		put_clnt_odstate(dp->dl_clnt_odstate);
962 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
963 		nfs4_put_stid(&dp->dl_stid);
964 	}
965 }
966 
967 static void revoke_delegation(struct nfs4_delegation *dp)
968 {
969 	struct nfs4_client *clp = dp->dl_stid.sc_client;
970 
971 	WARN_ON(!list_empty(&dp->dl_recall_lru));
972 
973 	put_clnt_odstate(dp->dl_clnt_odstate);
974 	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
975 
976 	if (clp->cl_minorversion == 0)
977 		nfs4_put_stid(&dp->dl_stid);
978 	else {
979 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
980 		spin_lock(&clp->cl_lock);
981 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
982 		spin_unlock(&clp->cl_lock);
983 	}
984 }
985 
986 /*
987  * SETCLIENTID state
988  */
989 
990 static unsigned int clientid_hashval(u32 id)
991 {
992 	return id & CLIENT_HASH_MASK;
993 }
994 
995 static unsigned int clientstr_hashval(const char *name)
996 {
997 	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
998 }
999 
1000 /*
1001  * We store the NONE, READ, WRITE, and BOTH bits separately in the
1002  * st_{access,deny}_bmap field of the stateid, in order to track not
1003  * only what share bits are currently in force, but also what
1004  * combinations of share bits previous opens have used.  This allows us
1005  * to enforce the recommendation of rfc 3530 14.2.19 that the server
1006  * return an error if the client attempt to downgrade to a combination
1007  * of share bits not explicable by closing some of its previous opens.
1008  *
1009  * XXX: This enforcement is actually incomplete, since we don't keep
1010  * track of access/deny bit combinations; so, e.g., we allow:
1011  *
1012  *	OPEN allow read, deny write
1013  *	OPEN allow both, deny none
1014  *	DOWNGRADE allow read, deny none
1015  *
1016  * which we should reject.
1017  */
1018 static unsigned int
1019 bmap_to_share_mode(unsigned long bmap) {
1020 	int i;
1021 	unsigned int access = 0;
1022 
1023 	for (i = 1; i < 4; i++) {
1024 		if (test_bit(i, &bmap))
1025 			access |= i;
1026 	}
1027 	return access;
1028 }
1029 
1030 /* set share access for a given stateid */
1031 static inline void
1032 set_access(u32 access, struct nfs4_ol_stateid *stp)
1033 {
1034 	unsigned char mask = 1 << access;
1035 
1036 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1037 	stp->st_access_bmap |= mask;
1038 }
1039 
1040 /* clear share access for a given stateid */
1041 static inline void
1042 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1043 {
1044 	unsigned char mask = 1 << access;
1045 
1046 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1047 	stp->st_access_bmap &= ~mask;
1048 }
1049 
1050 /* test whether a given stateid has access */
1051 static inline bool
1052 test_access(u32 access, struct nfs4_ol_stateid *stp)
1053 {
1054 	unsigned char mask = 1 << access;
1055 
1056 	return (bool)(stp->st_access_bmap & mask);
1057 }
1058 
1059 /* set share deny for a given stateid */
1060 static inline void
1061 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1062 {
1063 	unsigned char mask = 1 << deny;
1064 
1065 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1066 	stp->st_deny_bmap |= mask;
1067 }
1068 
1069 /* clear share deny for a given stateid */
1070 static inline void
1071 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1072 {
1073 	unsigned char mask = 1 << deny;
1074 
1075 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1076 	stp->st_deny_bmap &= ~mask;
1077 }
1078 
1079 /* test whether a given stateid is denying specific access */
1080 static inline bool
1081 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1082 {
1083 	unsigned char mask = 1 << deny;
1084 
1085 	return (bool)(stp->st_deny_bmap & mask);
1086 }
1087 
1088 static int nfs4_access_to_omode(u32 access)
1089 {
1090 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
1091 	case NFS4_SHARE_ACCESS_READ:
1092 		return O_RDONLY;
1093 	case NFS4_SHARE_ACCESS_WRITE:
1094 		return O_WRONLY;
1095 	case NFS4_SHARE_ACCESS_BOTH:
1096 		return O_RDWR;
1097 	}
1098 	WARN_ON_ONCE(1);
1099 	return O_RDONLY;
1100 }
1101 
1102 /*
1103  * A stateid that had a deny mode associated with it is being released
1104  * or downgraded. Recalculate the deny mode on the file.
1105  */
1106 static void
1107 recalculate_deny_mode(struct nfs4_file *fp)
1108 {
1109 	struct nfs4_ol_stateid *stp;
1110 
1111 	spin_lock(&fp->fi_lock);
1112 	fp->fi_share_deny = 0;
1113 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1114 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1115 	spin_unlock(&fp->fi_lock);
1116 }
1117 
1118 static void
1119 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1120 {
1121 	int i;
1122 	bool change = false;
1123 
1124 	for (i = 1; i < 4; i++) {
1125 		if ((i & deny) != i) {
1126 			change = true;
1127 			clear_deny(i, stp);
1128 		}
1129 	}
1130 
1131 	/* Recalculate per-file deny mode if there was a change */
1132 	if (change)
1133 		recalculate_deny_mode(stp->st_stid.sc_file);
1134 }
1135 
1136 /* release all access and file references for a given stateid */
1137 static void
1138 release_all_access(struct nfs4_ol_stateid *stp)
1139 {
1140 	int i;
1141 	struct nfs4_file *fp = stp->st_stid.sc_file;
1142 
1143 	if (fp && stp->st_deny_bmap != 0)
1144 		recalculate_deny_mode(fp);
1145 
1146 	for (i = 1; i < 4; i++) {
1147 		if (test_access(i, stp))
1148 			nfs4_file_put_access(stp->st_stid.sc_file, i);
1149 		clear_access(i, stp);
1150 	}
1151 }
1152 
1153 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1154 {
1155 	kfree(sop->so_owner.data);
1156 	sop->so_ops->so_free(sop);
1157 }
1158 
1159 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1160 {
1161 	struct nfs4_client *clp = sop->so_client;
1162 
1163 	might_lock(&clp->cl_lock);
1164 
1165 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1166 		return;
1167 	sop->so_ops->so_unhash(sop);
1168 	spin_unlock(&clp->cl_lock);
1169 	nfs4_free_stateowner(sop);
1170 }
1171 
1172 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1173 {
1174 	struct nfs4_file *fp = stp->st_stid.sc_file;
1175 
1176 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1177 
1178 	if (list_empty(&stp->st_perfile))
1179 		return false;
1180 
1181 	spin_lock(&fp->fi_lock);
1182 	list_del_init(&stp->st_perfile);
1183 	spin_unlock(&fp->fi_lock);
1184 	list_del(&stp->st_perstateowner);
1185 	return true;
1186 }
1187 
1188 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1189 {
1190 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1191 
1192 	put_clnt_odstate(stp->st_clnt_odstate);
1193 	release_all_access(stp);
1194 	if (stp->st_stateowner)
1195 		nfs4_put_stateowner(stp->st_stateowner);
1196 	kmem_cache_free(stateid_slab, stid);
1197 }
1198 
1199 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1200 {
1201 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1202 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1203 	struct file *file;
1204 
1205 	file = find_any_file(stp->st_stid.sc_file);
1206 	if (file)
1207 		filp_close(file, (fl_owner_t)lo);
1208 	nfs4_free_ol_stateid(stid);
1209 }
1210 
1211 /*
1212  * Put the persistent reference to an already unhashed generic stateid, while
1213  * holding the cl_lock. If it's the last reference, then put it onto the
1214  * reaplist for later destruction.
1215  */
1216 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1217 				       struct list_head *reaplist)
1218 {
1219 	struct nfs4_stid *s = &stp->st_stid;
1220 	struct nfs4_client *clp = s->sc_client;
1221 
1222 	lockdep_assert_held(&clp->cl_lock);
1223 
1224 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1225 
1226 	if (!refcount_dec_and_test(&s->sc_count)) {
1227 		wake_up_all(&close_wq);
1228 		return;
1229 	}
1230 
1231 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1232 	list_add(&stp->st_locks, reaplist);
1233 }
1234 
1235 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1236 {
1237 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1238 
1239 	list_del_init(&stp->st_locks);
1240 	nfs4_unhash_stid(&stp->st_stid);
1241 	return unhash_ol_stateid(stp);
1242 }
1243 
1244 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1245 {
1246 	struct nfs4_client *clp = stp->st_stid.sc_client;
1247 	bool unhashed;
1248 
1249 	spin_lock(&clp->cl_lock);
1250 	unhashed = unhash_lock_stateid(stp);
1251 	spin_unlock(&clp->cl_lock);
1252 	if (unhashed)
1253 		nfs4_put_stid(&stp->st_stid);
1254 }
1255 
1256 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1257 {
1258 	struct nfs4_client *clp = lo->lo_owner.so_client;
1259 
1260 	lockdep_assert_held(&clp->cl_lock);
1261 
1262 	list_del_init(&lo->lo_owner.so_strhash);
1263 }
1264 
1265 /*
1266  * Free a list of generic stateids that were collected earlier after being
1267  * fully unhashed.
1268  */
1269 static void
1270 free_ol_stateid_reaplist(struct list_head *reaplist)
1271 {
1272 	struct nfs4_ol_stateid *stp;
1273 	struct nfs4_file *fp;
1274 
1275 	might_sleep();
1276 
1277 	while (!list_empty(reaplist)) {
1278 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1279 				       st_locks);
1280 		list_del(&stp->st_locks);
1281 		fp = stp->st_stid.sc_file;
1282 		stp->st_stid.sc_free(&stp->st_stid);
1283 		if (fp)
1284 			put_nfs4_file(fp);
1285 	}
1286 }
1287 
1288 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1289 				       struct list_head *reaplist)
1290 {
1291 	struct nfs4_ol_stateid *stp;
1292 
1293 	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1294 
1295 	while (!list_empty(&open_stp->st_locks)) {
1296 		stp = list_entry(open_stp->st_locks.next,
1297 				struct nfs4_ol_stateid, st_locks);
1298 		WARN_ON(!unhash_lock_stateid(stp));
1299 		put_ol_stateid_locked(stp, reaplist);
1300 	}
1301 }
1302 
1303 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1304 				struct list_head *reaplist)
1305 {
1306 	bool unhashed;
1307 
1308 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1309 
1310 	unhashed = unhash_ol_stateid(stp);
1311 	release_open_stateid_locks(stp, reaplist);
1312 	return unhashed;
1313 }
1314 
1315 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1316 {
1317 	LIST_HEAD(reaplist);
1318 
1319 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1320 	if (unhash_open_stateid(stp, &reaplist))
1321 		put_ol_stateid_locked(stp, &reaplist);
1322 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1323 	free_ol_stateid_reaplist(&reaplist);
1324 }
1325 
1326 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1327 {
1328 	struct nfs4_client *clp = oo->oo_owner.so_client;
1329 
1330 	lockdep_assert_held(&clp->cl_lock);
1331 
1332 	list_del_init(&oo->oo_owner.so_strhash);
1333 	list_del_init(&oo->oo_perclient);
1334 }
1335 
1336 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1337 {
1338 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1339 					  nfsd_net_id);
1340 	struct nfs4_ol_stateid *s;
1341 
1342 	spin_lock(&nn->client_lock);
1343 	s = oo->oo_last_closed_stid;
1344 	if (s) {
1345 		list_del_init(&oo->oo_close_lru);
1346 		oo->oo_last_closed_stid = NULL;
1347 	}
1348 	spin_unlock(&nn->client_lock);
1349 	if (s)
1350 		nfs4_put_stid(&s->st_stid);
1351 }
1352 
1353 static void release_openowner(struct nfs4_openowner *oo)
1354 {
1355 	struct nfs4_ol_stateid *stp;
1356 	struct nfs4_client *clp = oo->oo_owner.so_client;
1357 	struct list_head reaplist;
1358 
1359 	INIT_LIST_HEAD(&reaplist);
1360 
1361 	spin_lock(&clp->cl_lock);
1362 	unhash_openowner_locked(oo);
1363 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1364 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1365 				struct nfs4_ol_stateid, st_perstateowner);
1366 		if (unhash_open_stateid(stp, &reaplist))
1367 			put_ol_stateid_locked(stp, &reaplist);
1368 	}
1369 	spin_unlock(&clp->cl_lock);
1370 	free_ol_stateid_reaplist(&reaplist);
1371 	release_last_closed_stateid(oo);
1372 	nfs4_put_stateowner(&oo->oo_owner);
1373 }
1374 
1375 static inline int
1376 hash_sessionid(struct nfs4_sessionid *sessionid)
1377 {
1378 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1379 
1380 	return sid->sequence % SESSION_HASH_SIZE;
1381 }
1382 
1383 #ifdef CONFIG_SUNRPC_DEBUG
1384 static inline void
1385 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1386 {
1387 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1388 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1389 }
1390 #else
1391 static inline void
1392 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1393 {
1394 }
1395 #endif
1396 
1397 /*
1398  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1399  * won't be used for replay.
1400  */
1401 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1402 {
1403 	struct nfs4_stateowner *so = cstate->replay_owner;
1404 
1405 	if (nfserr == nfserr_replay_me)
1406 		return;
1407 
1408 	if (!seqid_mutating_err(ntohl(nfserr))) {
1409 		nfsd4_cstate_clear_replay(cstate);
1410 		return;
1411 	}
1412 	if (!so)
1413 		return;
1414 	if (so->so_is_open_owner)
1415 		release_last_closed_stateid(openowner(so));
1416 	so->so_seqid++;
1417 	return;
1418 }
1419 
1420 static void
1421 gen_sessionid(struct nfsd4_session *ses)
1422 {
1423 	struct nfs4_client *clp = ses->se_client;
1424 	struct nfsd4_sessionid *sid;
1425 
1426 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1427 	sid->clientid = clp->cl_clientid;
1428 	sid->sequence = current_sessionid++;
1429 	sid->reserved = 0;
1430 }
1431 
1432 /*
1433  * The protocol defines ca_maxresponssize_cached to include the size of
1434  * the rpc header, but all we need to cache is the data starting after
1435  * the end of the initial SEQUENCE operation--the rest we regenerate
1436  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1437  * value that is the number of bytes in our cache plus a few additional
1438  * bytes.  In order to stay on the safe side, and not promise more than
1439  * we can cache, those additional bytes must be the minimum possible: 24
1440  * bytes of rpc header (xid through accept state, with AUTH_NULL
1441  * verifier), 12 for the compound header (with zero-length tag), and 44
1442  * for the SEQUENCE op response:
1443  */
1444 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1445 
1446 static void
1447 free_session_slots(struct nfsd4_session *ses)
1448 {
1449 	int i;
1450 
1451 	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1452 		free_svc_cred(&ses->se_slots[i]->sl_cred);
1453 		kfree(ses->se_slots[i]);
1454 	}
1455 }
1456 
1457 /*
1458  * We don't actually need to cache the rpc and session headers, so we
1459  * can allocate a little less for each slot:
1460  */
1461 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1462 {
1463 	u32 size;
1464 
1465 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1466 		size = 0;
1467 	else
1468 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1469 	return size + sizeof(struct nfsd4_slot);
1470 }
1471 
1472 /*
1473  * XXX: If we run out of reserved DRC memory we could (up to a point)
1474  * re-negotiate active sessions and reduce their slot usage to make
1475  * room for new connections. For now we just fail the create session.
1476  */
1477 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1478 {
1479 	u32 slotsize = slot_bytes(ca);
1480 	u32 num = ca->maxreqs;
1481 	int avail;
1482 
1483 	spin_lock(&nfsd_drc_lock);
1484 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1485 		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1486 	/*
1487 	 * Never use more than a third of the remaining memory,
1488 	 * unless it's the only way to give this client a slot:
1489 	 */
1490 	avail = clamp_t(int, avail, slotsize, avail/3);
1491 	num = min_t(int, num, avail / slotsize);
1492 	nfsd_drc_mem_used += num * slotsize;
1493 	spin_unlock(&nfsd_drc_lock);
1494 
1495 	return num;
1496 }
1497 
1498 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1499 {
1500 	int slotsize = slot_bytes(ca);
1501 
1502 	spin_lock(&nfsd_drc_lock);
1503 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1504 	spin_unlock(&nfsd_drc_lock);
1505 }
1506 
1507 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1508 					   struct nfsd4_channel_attrs *battrs)
1509 {
1510 	int numslots = fattrs->maxreqs;
1511 	int slotsize = slot_bytes(fattrs);
1512 	struct nfsd4_session *new;
1513 	int mem, i;
1514 
1515 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1516 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1517 	mem = numslots * sizeof(struct nfsd4_slot *);
1518 
1519 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1520 	if (!new)
1521 		return NULL;
1522 	/* allocate each struct nfsd4_slot and data cache in one piece */
1523 	for (i = 0; i < numslots; i++) {
1524 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1525 		if (!new->se_slots[i])
1526 			goto out_free;
1527 	}
1528 
1529 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1530 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1531 
1532 	return new;
1533 out_free:
1534 	while (i--)
1535 		kfree(new->se_slots[i]);
1536 	kfree(new);
1537 	return NULL;
1538 }
1539 
1540 static void free_conn(struct nfsd4_conn *c)
1541 {
1542 	svc_xprt_put(c->cn_xprt);
1543 	kfree(c);
1544 }
1545 
1546 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1547 {
1548 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1549 	struct nfs4_client *clp = c->cn_session->se_client;
1550 
1551 	spin_lock(&clp->cl_lock);
1552 	if (!list_empty(&c->cn_persession)) {
1553 		list_del(&c->cn_persession);
1554 		free_conn(c);
1555 	}
1556 	nfsd4_probe_callback(clp);
1557 	spin_unlock(&clp->cl_lock);
1558 }
1559 
1560 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1561 {
1562 	struct nfsd4_conn *conn;
1563 
1564 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1565 	if (!conn)
1566 		return NULL;
1567 	svc_xprt_get(rqstp->rq_xprt);
1568 	conn->cn_xprt = rqstp->rq_xprt;
1569 	conn->cn_flags = flags;
1570 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1571 	return conn;
1572 }
1573 
1574 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1575 {
1576 	conn->cn_session = ses;
1577 	list_add(&conn->cn_persession, &ses->se_conns);
1578 }
1579 
1580 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1581 {
1582 	struct nfs4_client *clp = ses->se_client;
1583 
1584 	spin_lock(&clp->cl_lock);
1585 	__nfsd4_hash_conn(conn, ses);
1586 	spin_unlock(&clp->cl_lock);
1587 }
1588 
1589 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1590 {
1591 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1592 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1593 }
1594 
1595 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1596 {
1597 	int ret;
1598 
1599 	nfsd4_hash_conn(conn, ses);
1600 	ret = nfsd4_register_conn(conn);
1601 	if (ret)
1602 		/* oops; xprt is already down: */
1603 		nfsd4_conn_lost(&conn->cn_xpt_user);
1604 	/* We may have gained or lost a callback channel: */
1605 	nfsd4_probe_callback_sync(ses->se_client);
1606 }
1607 
1608 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1609 {
1610 	u32 dir = NFS4_CDFC4_FORE;
1611 
1612 	if (cses->flags & SESSION4_BACK_CHAN)
1613 		dir |= NFS4_CDFC4_BACK;
1614 	return alloc_conn(rqstp, dir);
1615 }
1616 
1617 /* must be called under client_lock */
1618 static void nfsd4_del_conns(struct nfsd4_session *s)
1619 {
1620 	struct nfs4_client *clp = s->se_client;
1621 	struct nfsd4_conn *c;
1622 
1623 	spin_lock(&clp->cl_lock);
1624 	while (!list_empty(&s->se_conns)) {
1625 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1626 		list_del_init(&c->cn_persession);
1627 		spin_unlock(&clp->cl_lock);
1628 
1629 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1630 		free_conn(c);
1631 
1632 		spin_lock(&clp->cl_lock);
1633 	}
1634 	spin_unlock(&clp->cl_lock);
1635 }
1636 
1637 static void __free_session(struct nfsd4_session *ses)
1638 {
1639 	free_session_slots(ses);
1640 	kfree(ses);
1641 }
1642 
1643 static void free_session(struct nfsd4_session *ses)
1644 {
1645 	nfsd4_del_conns(ses);
1646 	nfsd4_put_drc_mem(&ses->se_fchannel);
1647 	__free_session(ses);
1648 }
1649 
1650 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1651 {
1652 	int idx;
1653 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1654 
1655 	new->se_client = clp;
1656 	gen_sessionid(new);
1657 
1658 	INIT_LIST_HEAD(&new->se_conns);
1659 
1660 	new->se_cb_seq_nr = 1;
1661 	new->se_flags = cses->flags;
1662 	new->se_cb_prog = cses->callback_prog;
1663 	new->se_cb_sec = cses->cb_sec;
1664 	atomic_set(&new->se_ref, 0);
1665 	idx = hash_sessionid(&new->se_sessionid);
1666 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1667 	spin_lock(&clp->cl_lock);
1668 	list_add(&new->se_perclnt, &clp->cl_sessions);
1669 	spin_unlock(&clp->cl_lock);
1670 
1671 	{
1672 		struct sockaddr *sa = svc_addr(rqstp);
1673 		/*
1674 		 * This is a little silly; with sessions there's no real
1675 		 * use for the callback address.  Use the peer address
1676 		 * as a reasonable default for now, but consider fixing
1677 		 * the rpc client not to require an address in the
1678 		 * future:
1679 		 */
1680 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1681 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1682 	}
1683 }
1684 
1685 /* caller must hold client_lock */
1686 static struct nfsd4_session *
1687 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1688 {
1689 	struct nfsd4_session *elem;
1690 	int idx;
1691 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1692 
1693 	lockdep_assert_held(&nn->client_lock);
1694 
1695 	dump_sessionid(__func__, sessionid);
1696 	idx = hash_sessionid(sessionid);
1697 	/* Search in the appropriate list */
1698 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1699 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1700 			    NFS4_MAX_SESSIONID_LEN)) {
1701 			return elem;
1702 		}
1703 	}
1704 
1705 	dprintk("%s: session not found\n", __func__);
1706 	return NULL;
1707 }
1708 
1709 static struct nfsd4_session *
1710 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1711 		__be32 *ret)
1712 {
1713 	struct nfsd4_session *session;
1714 	__be32 status = nfserr_badsession;
1715 
1716 	session = __find_in_sessionid_hashtbl(sessionid, net);
1717 	if (!session)
1718 		goto out;
1719 	status = nfsd4_get_session_locked(session);
1720 	if (status)
1721 		session = NULL;
1722 out:
1723 	*ret = status;
1724 	return session;
1725 }
1726 
1727 /* caller must hold client_lock */
1728 static void
1729 unhash_session(struct nfsd4_session *ses)
1730 {
1731 	struct nfs4_client *clp = ses->se_client;
1732 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1733 
1734 	lockdep_assert_held(&nn->client_lock);
1735 
1736 	list_del(&ses->se_hash);
1737 	spin_lock(&ses->se_client->cl_lock);
1738 	list_del(&ses->se_perclnt);
1739 	spin_unlock(&ses->se_client->cl_lock);
1740 }
1741 
1742 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1743 static int
1744 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1745 {
1746 	/*
1747 	 * We're assuming the clid was not given out from a boot
1748 	 * precisely 2^32 (about 136 years) before this one.  That seems
1749 	 * a safe assumption:
1750 	 */
1751 	if (clid->cl_boot == (u32)nn->boot_time)
1752 		return 0;
1753 	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1754 		clid->cl_boot, clid->cl_id, nn->boot_time);
1755 	return 1;
1756 }
1757 
1758 /*
1759  * XXX Should we use a slab cache ?
1760  * This type of memory management is somewhat inefficient, but we use it
1761  * anyway since SETCLIENTID is not a common operation.
1762  */
1763 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1764 {
1765 	struct nfs4_client *clp;
1766 	int i;
1767 
1768 	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1769 	if (clp == NULL)
1770 		return NULL;
1771 	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1772 	if (clp->cl_name.data == NULL)
1773 		goto err_no_name;
1774 	clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1775 			OWNER_HASH_SIZE, GFP_KERNEL);
1776 	if (!clp->cl_ownerstr_hashtbl)
1777 		goto err_no_hashtbl;
1778 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1779 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1780 	clp->cl_name.len = name.len;
1781 	INIT_LIST_HEAD(&clp->cl_sessions);
1782 	idr_init(&clp->cl_stateids);
1783 	atomic_set(&clp->cl_refcount, 0);
1784 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1785 	INIT_LIST_HEAD(&clp->cl_idhash);
1786 	INIT_LIST_HEAD(&clp->cl_openowners);
1787 	INIT_LIST_HEAD(&clp->cl_delegations);
1788 	INIT_LIST_HEAD(&clp->cl_lru);
1789 	INIT_LIST_HEAD(&clp->cl_revoked);
1790 #ifdef CONFIG_NFSD_PNFS
1791 	INIT_LIST_HEAD(&clp->cl_lo_states);
1792 #endif
1793 	spin_lock_init(&clp->cl_lock);
1794 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1795 	return clp;
1796 err_no_hashtbl:
1797 	kfree(clp->cl_name.data);
1798 err_no_name:
1799 	kfree(clp);
1800 	return NULL;
1801 }
1802 
1803 static void
1804 free_client(struct nfs4_client *clp)
1805 {
1806 	while (!list_empty(&clp->cl_sessions)) {
1807 		struct nfsd4_session *ses;
1808 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1809 				se_perclnt);
1810 		list_del(&ses->se_perclnt);
1811 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
1812 		free_session(ses);
1813 	}
1814 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1815 	free_svc_cred(&clp->cl_cred);
1816 	kfree(clp->cl_ownerstr_hashtbl);
1817 	kfree(clp->cl_name.data);
1818 	idr_destroy(&clp->cl_stateids);
1819 	kfree(clp);
1820 }
1821 
1822 /* must be called under the client_lock */
1823 static void
1824 unhash_client_locked(struct nfs4_client *clp)
1825 {
1826 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1827 	struct nfsd4_session *ses;
1828 
1829 	lockdep_assert_held(&nn->client_lock);
1830 
1831 	/* Mark the client as expired! */
1832 	clp->cl_time = 0;
1833 	/* Make it invisible */
1834 	if (!list_empty(&clp->cl_idhash)) {
1835 		list_del_init(&clp->cl_idhash);
1836 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1837 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1838 		else
1839 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1840 	}
1841 	list_del_init(&clp->cl_lru);
1842 	spin_lock(&clp->cl_lock);
1843 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1844 		list_del_init(&ses->se_hash);
1845 	spin_unlock(&clp->cl_lock);
1846 }
1847 
1848 static void
1849 unhash_client(struct nfs4_client *clp)
1850 {
1851 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1852 
1853 	spin_lock(&nn->client_lock);
1854 	unhash_client_locked(clp);
1855 	spin_unlock(&nn->client_lock);
1856 }
1857 
1858 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1859 {
1860 	if (atomic_read(&clp->cl_refcount))
1861 		return nfserr_jukebox;
1862 	unhash_client_locked(clp);
1863 	return nfs_ok;
1864 }
1865 
1866 static void
1867 __destroy_client(struct nfs4_client *clp)
1868 {
1869 	struct nfs4_openowner *oo;
1870 	struct nfs4_delegation *dp;
1871 	struct list_head reaplist;
1872 
1873 	INIT_LIST_HEAD(&reaplist);
1874 	spin_lock(&state_lock);
1875 	while (!list_empty(&clp->cl_delegations)) {
1876 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1877 		WARN_ON(!unhash_delegation_locked(dp));
1878 		list_add(&dp->dl_recall_lru, &reaplist);
1879 	}
1880 	spin_unlock(&state_lock);
1881 	while (!list_empty(&reaplist)) {
1882 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1883 		list_del_init(&dp->dl_recall_lru);
1884 		put_clnt_odstate(dp->dl_clnt_odstate);
1885 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1886 		nfs4_put_stid(&dp->dl_stid);
1887 	}
1888 	while (!list_empty(&clp->cl_revoked)) {
1889 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1890 		list_del_init(&dp->dl_recall_lru);
1891 		nfs4_put_stid(&dp->dl_stid);
1892 	}
1893 	while (!list_empty(&clp->cl_openowners)) {
1894 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1895 		nfs4_get_stateowner(&oo->oo_owner);
1896 		release_openowner(oo);
1897 	}
1898 	nfsd4_return_all_client_layouts(clp);
1899 	nfsd4_shutdown_callback(clp);
1900 	if (clp->cl_cb_conn.cb_xprt)
1901 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1902 	free_client(clp);
1903 }
1904 
1905 static void
1906 destroy_client(struct nfs4_client *clp)
1907 {
1908 	unhash_client(clp);
1909 	__destroy_client(clp);
1910 }
1911 
1912 static void expire_client(struct nfs4_client *clp)
1913 {
1914 	unhash_client(clp);
1915 	nfsd4_client_record_remove(clp);
1916 	__destroy_client(clp);
1917 }
1918 
1919 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1920 {
1921 	memcpy(target->cl_verifier.data, source->data,
1922 			sizeof(target->cl_verifier.data));
1923 }
1924 
1925 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1926 {
1927 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1928 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1929 }
1930 
1931 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1932 {
1933 	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1934 	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1935 								GFP_KERNEL);
1936 	if ((source->cr_principal && ! target->cr_principal) ||
1937 	    (source->cr_raw_principal && ! target->cr_raw_principal))
1938 		return -ENOMEM;
1939 
1940 	target->cr_flavor = source->cr_flavor;
1941 	target->cr_uid = source->cr_uid;
1942 	target->cr_gid = source->cr_gid;
1943 	target->cr_group_info = source->cr_group_info;
1944 	get_group_info(target->cr_group_info);
1945 	target->cr_gss_mech = source->cr_gss_mech;
1946 	if (source->cr_gss_mech)
1947 		gss_mech_get(source->cr_gss_mech);
1948 	return 0;
1949 }
1950 
1951 static int
1952 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1953 {
1954 	if (o1->len < o2->len)
1955 		return -1;
1956 	if (o1->len > o2->len)
1957 		return 1;
1958 	return memcmp(o1->data, o2->data, o1->len);
1959 }
1960 
1961 static int same_name(const char *n1, const char *n2)
1962 {
1963 	return 0 == memcmp(n1, n2, HEXDIR_LEN);
1964 }
1965 
1966 static int
1967 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1968 {
1969 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1970 }
1971 
1972 static int
1973 same_clid(clientid_t *cl1, clientid_t *cl2)
1974 {
1975 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1976 }
1977 
1978 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1979 {
1980 	int i;
1981 
1982 	if (g1->ngroups != g2->ngroups)
1983 		return false;
1984 	for (i=0; i<g1->ngroups; i++)
1985 		if (!gid_eq(g1->gid[i], g2->gid[i]))
1986 			return false;
1987 	return true;
1988 }
1989 
1990 /*
1991  * RFC 3530 language requires clid_inuse be returned when the
1992  * "principal" associated with a requests differs from that previously
1993  * used.  We use uid, gid's, and gss principal string as our best
1994  * approximation.  We also don't want to allow non-gss use of a client
1995  * established using gss: in theory cr_principal should catch that
1996  * change, but in practice cr_principal can be null even in the gss case
1997  * since gssd doesn't always pass down a principal string.
1998  */
1999 static bool is_gss_cred(struct svc_cred *cr)
2000 {
2001 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
2002 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2003 }
2004 
2005 
2006 static bool
2007 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2008 {
2009 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2010 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2011 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2012 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2013 		return false;
2014 	if (cr1->cr_principal == cr2->cr_principal)
2015 		return true;
2016 	if (!cr1->cr_principal || !cr2->cr_principal)
2017 		return false;
2018 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2019 }
2020 
2021 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2022 {
2023 	struct svc_cred *cr = &rqstp->rq_cred;
2024 	u32 service;
2025 
2026 	if (!cr->cr_gss_mech)
2027 		return false;
2028 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2029 	return service == RPC_GSS_SVC_INTEGRITY ||
2030 	       service == RPC_GSS_SVC_PRIVACY;
2031 }
2032 
2033 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2034 {
2035 	struct svc_cred *cr = &rqstp->rq_cred;
2036 
2037 	if (!cl->cl_mach_cred)
2038 		return true;
2039 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2040 		return false;
2041 	if (!svc_rqst_integrity_protected(rqstp))
2042 		return false;
2043 	if (cl->cl_cred.cr_raw_principal)
2044 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2045 						cr->cr_raw_principal);
2046 	if (!cr->cr_principal)
2047 		return false;
2048 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2049 }
2050 
2051 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2052 {
2053 	__be32 verf[2];
2054 
2055 	/*
2056 	 * This is opaque to client, so no need to byte-swap. Use
2057 	 * __force to keep sparse happy
2058 	 */
2059 	verf[0] = (__force __be32)get_seconds();
2060 	verf[1] = (__force __be32)nn->clverifier_counter++;
2061 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2062 }
2063 
2064 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2065 {
2066 	clp->cl_clientid.cl_boot = nn->boot_time;
2067 	clp->cl_clientid.cl_id = nn->clientid_counter++;
2068 	gen_confirm(clp, nn);
2069 }
2070 
2071 static struct nfs4_stid *
2072 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2073 {
2074 	struct nfs4_stid *ret;
2075 
2076 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2077 	if (!ret || !ret->sc_type)
2078 		return NULL;
2079 	return ret;
2080 }
2081 
2082 static struct nfs4_stid *
2083 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2084 {
2085 	struct nfs4_stid *s;
2086 
2087 	spin_lock(&cl->cl_lock);
2088 	s = find_stateid_locked(cl, t);
2089 	if (s != NULL) {
2090 		if (typemask & s->sc_type)
2091 			refcount_inc(&s->sc_count);
2092 		else
2093 			s = NULL;
2094 	}
2095 	spin_unlock(&cl->cl_lock);
2096 	return s;
2097 }
2098 
2099 static struct nfs4_client *create_client(struct xdr_netobj name,
2100 		struct svc_rqst *rqstp, nfs4_verifier *verf)
2101 {
2102 	struct nfs4_client *clp;
2103 	struct sockaddr *sa = svc_addr(rqstp);
2104 	int ret;
2105 	struct net *net = SVC_NET(rqstp);
2106 
2107 	clp = alloc_client(name);
2108 	if (clp == NULL)
2109 		return NULL;
2110 
2111 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2112 	if (ret) {
2113 		free_client(clp);
2114 		return NULL;
2115 	}
2116 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2117 	clp->cl_time = get_seconds();
2118 	clear_bit(0, &clp->cl_cb_slot_busy);
2119 	copy_verf(clp, verf);
2120 	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2121 	clp->cl_cb_session = NULL;
2122 	clp->net = net;
2123 	return clp;
2124 }
2125 
2126 static void
2127 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2128 {
2129 	struct rb_node **new = &(root->rb_node), *parent = NULL;
2130 	struct nfs4_client *clp;
2131 
2132 	while (*new) {
2133 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2134 		parent = *new;
2135 
2136 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2137 			new = &((*new)->rb_left);
2138 		else
2139 			new = &((*new)->rb_right);
2140 	}
2141 
2142 	rb_link_node(&new_clp->cl_namenode, parent, new);
2143 	rb_insert_color(&new_clp->cl_namenode, root);
2144 }
2145 
2146 static struct nfs4_client *
2147 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2148 {
2149 	int cmp;
2150 	struct rb_node *node = root->rb_node;
2151 	struct nfs4_client *clp;
2152 
2153 	while (node) {
2154 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2155 		cmp = compare_blob(&clp->cl_name, name);
2156 		if (cmp > 0)
2157 			node = node->rb_left;
2158 		else if (cmp < 0)
2159 			node = node->rb_right;
2160 		else
2161 			return clp;
2162 	}
2163 	return NULL;
2164 }
2165 
2166 static void
2167 add_to_unconfirmed(struct nfs4_client *clp)
2168 {
2169 	unsigned int idhashval;
2170 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2171 
2172 	lockdep_assert_held(&nn->client_lock);
2173 
2174 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2175 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2176 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2177 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2178 	renew_client_locked(clp);
2179 }
2180 
2181 static void
2182 move_to_confirmed(struct nfs4_client *clp)
2183 {
2184 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2185 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2186 
2187 	lockdep_assert_held(&nn->client_lock);
2188 
2189 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2190 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2191 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2192 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
2193 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2194 	renew_client_locked(clp);
2195 }
2196 
2197 static struct nfs4_client *
2198 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2199 {
2200 	struct nfs4_client *clp;
2201 	unsigned int idhashval = clientid_hashval(clid->cl_id);
2202 
2203 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2204 		if (same_clid(&clp->cl_clientid, clid)) {
2205 			if ((bool)clp->cl_minorversion != sessions)
2206 				return NULL;
2207 			renew_client_locked(clp);
2208 			return clp;
2209 		}
2210 	}
2211 	return NULL;
2212 }
2213 
2214 static struct nfs4_client *
2215 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2216 {
2217 	struct list_head *tbl = nn->conf_id_hashtbl;
2218 
2219 	lockdep_assert_held(&nn->client_lock);
2220 	return find_client_in_id_table(tbl, clid, sessions);
2221 }
2222 
2223 static struct nfs4_client *
2224 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2225 {
2226 	struct list_head *tbl = nn->unconf_id_hashtbl;
2227 
2228 	lockdep_assert_held(&nn->client_lock);
2229 	return find_client_in_id_table(tbl, clid, sessions);
2230 }
2231 
2232 static bool clp_used_exchangeid(struct nfs4_client *clp)
2233 {
2234 	return clp->cl_exchange_flags != 0;
2235 }
2236 
2237 static struct nfs4_client *
2238 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2239 {
2240 	lockdep_assert_held(&nn->client_lock);
2241 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
2242 }
2243 
2244 static struct nfs4_client *
2245 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2246 {
2247 	lockdep_assert_held(&nn->client_lock);
2248 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2249 }
2250 
2251 static void
2252 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2253 {
2254 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2255 	struct sockaddr	*sa = svc_addr(rqstp);
2256 	u32 scopeid = rpc_get_scope_id(sa);
2257 	unsigned short expected_family;
2258 
2259 	/* Currently, we only support tcp and tcp6 for the callback channel */
2260 	if (se->se_callback_netid_len == 3 &&
2261 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2262 		expected_family = AF_INET;
2263 	else if (se->se_callback_netid_len == 4 &&
2264 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2265 		expected_family = AF_INET6;
2266 	else
2267 		goto out_err;
2268 
2269 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2270 					    se->se_callback_addr_len,
2271 					    (struct sockaddr *)&conn->cb_addr,
2272 					    sizeof(conn->cb_addr));
2273 
2274 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2275 		goto out_err;
2276 
2277 	if (conn->cb_addr.ss_family == AF_INET6)
2278 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2279 
2280 	conn->cb_prog = se->se_callback_prog;
2281 	conn->cb_ident = se->se_callback_ident;
2282 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2283 	return;
2284 out_err:
2285 	conn->cb_addr.ss_family = AF_UNSPEC;
2286 	conn->cb_addrlen = 0;
2287 	dprintk("NFSD: this client (clientid %08x/%08x) "
2288 		"will not receive delegations\n",
2289 		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2290 
2291 	return;
2292 }
2293 
2294 /*
2295  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2296  */
2297 static void
2298 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2299 {
2300 	struct xdr_buf *buf = resp->xdr.buf;
2301 	struct nfsd4_slot *slot = resp->cstate.slot;
2302 	unsigned int base;
2303 
2304 	dprintk("--> %s slot %p\n", __func__, slot);
2305 
2306 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2307 	slot->sl_opcnt = resp->opcnt;
2308 	slot->sl_status = resp->cstate.status;
2309 	free_svc_cred(&slot->sl_cred);
2310 	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2311 
2312 	if (!nfsd4_cache_this(resp)) {
2313 		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2314 		return;
2315 	}
2316 	slot->sl_flags |= NFSD4_SLOT_CACHED;
2317 
2318 	base = resp->cstate.data_offset;
2319 	slot->sl_datalen = buf->len - base;
2320 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2321 		WARN(1, "%s: sessions DRC could not cache compound\n",
2322 		     __func__);
2323 	return;
2324 }
2325 
2326 /*
2327  * Encode the replay sequence operation from the slot values.
2328  * If cachethis is FALSE encode the uncached rep error on the next
2329  * operation which sets resp->p and increments resp->opcnt for
2330  * nfs4svc_encode_compoundres.
2331  *
2332  */
2333 static __be32
2334 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2335 			  struct nfsd4_compoundres *resp)
2336 {
2337 	struct nfsd4_op *op;
2338 	struct nfsd4_slot *slot = resp->cstate.slot;
2339 
2340 	/* Encode the replayed sequence operation */
2341 	op = &args->ops[resp->opcnt - 1];
2342 	nfsd4_encode_operation(resp, op);
2343 
2344 	if (slot->sl_flags & NFSD4_SLOT_CACHED)
2345 		return op->status;
2346 	if (args->opcnt == 1) {
2347 		/*
2348 		 * The original operation wasn't a solo sequence--we
2349 		 * always cache those--so this retry must not match the
2350 		 * original:
2351 		 */
2352 		op->status = nfserr_seq_false_retry;
2353 	} else {
2354 		op = &args->ops[resp->opcnt++];
2355 		op->status = nfserr_retry_uncached_rep;
2356 		nfsd4_encode_operation(resp, op);
2357 	}
2358 	return op->status;
2359 }
2360 
2361 /*
2362  * The sequence operation is not cached because we can use the slot and
2363  * session values.
2364  */
2365 static __be32
2366 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2367 			 struct nfsd4_sequence *seq)
2368 {
2369 	struct nfsd4_slot *slot = resp->cstate.slot;
2370 	struct xdr_stream *xdr = &resp->xdr;
2371 	__be32 *p;
2372 	__be32 status;
2373 
2374 	dprintk("--> %s slot %p\n", __func__, slot);
2375 
2376 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2377 	if (status)
2378 		return status;
2379 
2380 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2381 	if (!p) {
2382 		WARN_ON_ONCE(1);
2383 		return nfserr_serverfault;
2384 	}
2385 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2386 	xdr_commit_encode(xdr);
2387 
2388 	resp->opcnt = slot->sl_opcnt;
2389 	return slot->sl_status;
2390 }
2391 
2392 /*
2393  * Set the exchange_id flags returned by the server.
2394  */
2395 static void
2396 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2397 {
2398 #ifdef CONFIG_NFSD_PNFS
2399 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2400 #else
2401 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2402 #endif
2403 
2404 	/* Referrals are supported, Migration is not. */
2405 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2406 
2407 	/* set the wire flags to return to client. */
2408 	clid->flags = new->cl_exchange_flags;
2409 }
2410 
2411 static bool client_has_openowners(struct nfs4_client *clp)
2412 {
2413 	struct nfs4_openowner *oo;
2414 
2415 	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2416 		if (!list_empty(&oo->oo_owner.so_stateids))
2417 			return true;
2418 	}
2419 	return false;
2420 }
2421 
2422 static bool client_has_state(struct nfs4_client *clp)
2423 {
2424 	return client_has_openowners(clp)
2425 #ifdef CONFIG_NFSD_PNFS
2426 		|| !list_empty(&clp->cl_lo_states)
2427 #endif
2428 		|| !list_empty(&clp->cl_delegations)
2429 		|| !list_empty(&clp->cl_sessions);
2430 }
2431 
2432 __be32
2433 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2434 		union nfsd4_op_u *u)
2435 {
2436 	struct nfsd4_exchange_id *exid = &u->exchange_id;
2437 	struct nfs4_client *conf, *new;
2438 	struct nfs4_client *unconf = NULL;
2439 	__be32 status;
2440 	char			addr_str[INET6_ADDRSTRLEN];
2441 	nfs4_verifier		verf = exid->verifier;
2442 	struct sockaddr		*sa = svc_addr(rqstp);
2443 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2444 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2445 
2446 	rpc_ntop(sa, addr_str, sizeof(addr_str));
2447 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2448 		"ip_addr=%s flags %x, spa_how %d\n",
2449 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
2450 		addr_str, exid->flags, exid->spa_how);
2451 
2452 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2453 		return nfserr_inval;
2454 
2455 	new = create_client(exid->clname, rqstp, &verf);
2456 	if (new == NULL)
2457 		return nfserr_jukebox;
2458 
2459 	switch (exid->spa_how) {
2460 	case SP4_MACH_CRED:
2461 		exid->spo_must_enforce[0] = 0;
2462 		exid->spo_must_enforce[1] = (
2463 			1 << (OP_BIND_CONN_TO_SESSION - 32) |
2464 			1 << (OP_EXCHANGE_ID - 32) |
2465 			1 << (OP_CREATE_SESSION - 32) |
2466 			1 << (OP_DESTROY_SESSION - 32) |
2467 			1 << (OP_DESTROY_CLIENTID - 32));
2468 
2469 		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2470 					1 << (OP_OPEN_DOWNGRADE) |
2471 					1 << (OP_LOCKU) |
2472 					1 << (OP_DELEGRETURN));
2473 
2474 		exid->spo_must_allow[1] &= (
2475 					1 << (OP_TEST_STATEID - 32) |
2476 					1 << (OP_FREE_STATEID - 32));
2477 		if (!svc_rqst_integrity_protected(rqstp)) {
2478 			status = nfserr_inval;
2479 			goto out_nolock;
2480 		}
2481 		/*
2482 		 * Sometimes userspace doesn't give us a principal.
2483 		 * Which is a bug, really.  Anyway, we can't enforce
2484 		 * MACH_CRED in that case, better to give up now:
2485 		 */
2486 		if (!new->cl_cred.cr_principal &&
2487 					!new->cl_cred.cr_raw_principal) {
2488 			status = nfserr_serverfault;
2489 			goto out_nolock;
2490 		}
2491 		new->cl_mach_cred = true;
2492 	case SP4_NONE:
2493 		break;
2494 	default:				/* checked by xdr code */
2495 		WARN_ON_ONCE(1);
2496 	case SP4_SSV:
2497 		status = nfserr_encr_alg_unsupp;
2498 		goto out_nolock;
2499 	}
2500 
2501 	/* Cases below refer to rfc 5661 section 18.35.4: */
2502 	spin_lock(&nn->client_lock);
2503 	conf = find_confirmed_client_by_name(&exid->clname, nn);
2504 	if (conf) {
2505 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2506 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2507 
2508 		if (update) {
2509 			if (!clp_used_exchangeid(conf)) { /* buggy client */
2510 				status = nfserr_inval;
2511 				goto out;
2512 			}
2513 			if (!nfsd4_mach_creds_match(conf, rqstp)) {
2514 				status = nfserr_wrong_cred;
2515 				goto out;
2516 			}
2517 			if (!creds_match) { /* case 9 */
2518 				status = nfserr_perm;
2519 				goto out;
2520 			}
2521 			if (!verfs_match) { /* case 8 */
2522 				status = nfserr_not_same;
2523 				goto out;
2524 			}
2525 			/* case 6 */
2526 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2527 			goto out_copy;
2528 		}
2529 		if (!creds_match) { /* case 3 */
2530 			if (client_has_state(conf)) {
2531 				status = nfserr_clid_inuse;
2532 				goto out;
2533 			}
2534 			goto out_new;
2535 		}
2536 		if (verfs_match) { /* case 2 */
2537 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2538 			goto out_copy;
2539 		}
2540 		/* case 5, client reboot */
2541 		conf = NULL;
2542 		goto out_new;
2543 	}
2544 
2545 	if (update) { /* case 7 */
2546 		status = nfserr_noent;
2547 		goto out;
2548 	}
2549 
2550 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2551 	if (unconf) /* case 4, possible retry or client restart */
2552 		unhash_client_locked(unconf);
2553 
2554 	/* case 1 (normal case) */
2555 out_new:
2556 	if (conf) {
2557 		status = mark_client_expired_locked(conf);
2558 		if (status)
2559 			goto out;
2560 	}
2561 	new->cl_minorversion = cstate->minorversion;
2562 	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2563 	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2564 
2565 	gen_clid(new, nn);
2566 	add_to_unconfirmed(new);
2567 	swap(new, conf);
2568 out_copy:
2569 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2570 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
2571 
2572 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2573 	nfsd4_set_ex_flags(conf, exid);
2574 
2575 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2576 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2577 	status = nfs_ok;
2578 
2579 out:
2580 	spin_unlock(&nn->client_lock);
2581 out_nolock:
2582 	if (new)
2583 		expire_client(new);
2584 	if (unconf)
2585 		expire_client(unconf);
2586 	return status;
2587 }
2588 
2589 static __be32
2590 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2591 {
2592 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2593 		slot_seqid);
2594 
2595 	/* The slot is in use, and no response has been sent. */
2596 	if (slot_inuse) {
2597 		if (seqid == slot_seqid)
2598 			return nfserr_jukebox;
2599 		else
2600 			return nfserr_seq_misordered;
2601 	}
2602 	/* Note unsigned 32-bit arithmetic handles wraparound: */
2603 	if (likely(seqid == slot_seqid + 1))
2604 		return nfs_ok;
2605 	if (seqid == slot_seqid)
2606 		return nfserr_replay_cache;
2607 	return nfserr_seq_misordered;
2608 }
2609 
2610 /*
2611  * Cache the create session result into the create session single DRC
2612  * slot cache by saving the xdr structure. sl_seqid has been set.
2613  * Do this for solo or embedded create session operations.
2614  */
2615 static void
2616 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2617 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2618 {
2619 	slot->sl_status = nfserr;
2620 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2621 }
2622 
2623 static __be32
2624 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2625 			    struct nfsd4_clid_slot *slot)
2626 {
2627 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2628 	return slot->sl_status;
2629 }
2630 
2631 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
2632 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2633 			1 +	/* MIN tag is length with zero, only length */ \
2634 			3 +	/* version, opcount, opcode */ \
2635 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2636 				/* seqid, slotID, slotID, cache */ \
2637 			4 ) * sizeof(__be32))
2638 
2639 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2640 			2 +	/* verifier: AUTH_NULL, length 0 */\
2641 			1 +	/* status */ \
2642 			1 +	/* MIN tag is length with zero, only length */ \
2643 			3 +	/* opcount, opcode, opstatus*/ \
2644 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2645 				/* seqid, slotID, slotID, slotID, status */ \
2646 			5 ) * sizeof(__be32))
2647 
2648 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2649 {
2650 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2651 
2652 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2653 		return nfserr_toosmall;
2654 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2655 		return nfserr_toosmall;
2656 	ca->headerpadsz = 0;
2657 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2658 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2659 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2660 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2661 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2662 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2663 	/*
2664 	 * Note decreasing slot size below client's request may make it
2665 	 * difficult for client to function correctly, whereas
2666 	 * decreasing the number of slots will (just?) affect
2667 	 * performance.  When short on memory we therefore prefer to
2668 	 * decrease number of slots instead of their size.  Clients that
2669 	 * request larger slots than they need will get poor results:
2670 	 */
2671 	ca->maxreqs = nfsd4_get_drc_mem(ca);
2672 	if (!ca->maxreqs)
2673 		return nfserr_jukebox;
2674 
2675 	return nfs_ok;
2676 }
2677 
2678 /*
2679  * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2680  * These are based on similar macros in linux/sunrpc/msg_prot.h .
2681  */
2682 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2683 	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2684 
2685 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2686 	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2687 
2688 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
2689 				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2690 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
2691 				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2692 				 sizeof(__be32))
2693 
2694 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2695 {
2696 	ca->headerpadsz = 0;
2697 
2698 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2699 		return nfserr_toosmall;
2700 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2701 		return nfserr_toosmall;
2702 	ca->maxresp_cached = 0;
2703 	if (ca->maxops < 2)
2704 		return nfserr_toosmall;
2705 
2706 	return nfs_ok;
2707 }
2708 
2709 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2710 {
2711 	switch (cbs->flavor) {
2712 	case RPC_AUTH_NULL:
2713 	case RPC_AUTH_UNIX:
2714 		return nfs_ok;
2715 	default:
2716 		/*
2717 		 * GSS case: the spec doesn't allow us to return this
2718 		 * error.  But it also doesn't allow us not to support
2719 		 * GSS.
2720 		 * I'd rather this fail hard than return some error the
2721 		 * client might think it can already handle:
2722 		 */
2723 		return nfserr_encr_alg_unsupp;
2724 	}
2725 }
2726 
2727 __be32
2728 nfsd4_create_session(struct svc_rqst *rqstp,
2729 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2730 {
2731 	struct nfsd4_create_session *cr_ses = &u->create_session;
2732 	struct sockaddr *sa = svc_addr(rqstp);
2733 	struct nfs4_client *conf, *unconf;
2734 	struct nfs4_client *old = NULL;
2735 	struct nfsd4_session *new;
2736 	struct nfsd4_conn *conn;
2737 	struct nfsd4_clid_slot *cs_slot = NULL;
2738 	__be32 status = 0;
2739 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2740 
2741 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2742 		return nfserr_inval;
2743 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2744 	if (status)
2745 		return status;
2746 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2747 	if (status)
2748 		return status;
2749 	status = check_backchannel_attrs(&cr_ses->back_channel);
2750 	if (status)
2751 		goto out_release_drc_mem;
2752 	status = nfserr_jukebox;
2753 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2754 	if (!new)
2755 		goto out_release_drc_mem;
2756 	conn = alloc_conn_from_crses(rqstp, cr_ses);
2757 	if (!conn)
2758 		goto out_free_session;
2759 
2760 	spin_lock(&nn->client_lock);
2761 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2762 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2763 	WARN_ON_ONCE(conf && unconf);
2764 
2765 	if (conf) {
2766 		status = nfserr_wrong_cred;
2767 		if (!nfsd4_mach_creds_match(conf, rqstp))
2768 			goto out_free_conn;
2769 		cs_slot = &conf->cl_cs_slot;
2770 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2771 		if (status) {
2772 			if (status == nfserr_replay_cache)
2773 				status = nfsd4_replay_create_session(cr_ses, cs_slot);
2774 			goto out_free_conn;
2775 		}
2776 	} else if (unconf) {
2777 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2778 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2779 			status = nfserr_clid_inuse;
2780 			goto out_free_conn;
2781 		}
2782 		status = nfserr_wrong_cred;
2783 		if (!nfsd4_mach_creds_match(unconf, rqstp))
2784 			goto out_free_conn;
2785 		cs_slot = &unconf->cl_cs_slot;
2786 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2787 		if (status) {
2788 			/* an unconfirmed replay returns misordered */
2789 			status = nfserr_seq_misordered;
2790 			goto out_free_conn;
2791 		}
2792 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2793 		if (old) {
2794 			status = mark_client_expired_locked(old);
2795 			if (status) {
2796 				old = NULL;
2797 				goto out_free_conn;
2798 			}
2799 		}
2800 		move_to_confirmed(unconf);
2801 		conf = unconf;
2802 	} else {
2803 		status = nfserr_stale_clientid;
2804 		goto out_free_conn;
2805 	}
2806 	status = nfs_ok;
2807 	/* Persistent sessions are not supported */
2808 	cr_ses->flags &= ~SESSION4_PERSIST;
2809 	/* Upshifting from TCP to RDMA is not supported */
2810 	cr_ses->flags &= ~SESSION4_RDMA;
2811 
2812 	init_session(rqstp, new, conf, cr_ses);
2813 	nfsd4_get_session_locked(new);
2814 
2815 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2816 	       NFS4_MAX_SESSIONID_LEN);
2817 	cs_slot->sl_seqid++;
2818 	cr_ses->seqid = cs_slot->sl_seqid;
2819 
2820 	/* cache solo and embedded create sessions under the client_lock */
2821 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
2822 	spin_unlock(&nn->client_lock);
2823 	/* init connection and backchannel */
2824 	nfsd4_init_conn(rqstp, conn, new);
2825 	nfsd4_put_session(new);
2826 	if (old)
2827 		expire_client(old);
2828 	return status;
2829 out_free_conn:
2830 	spin_unlock(&nn->client_lock);
2831 	free_conn(conn);
2832 	if (old)
2833 		expire_client(old);
2834 out_free_session:
2835 	__free_session(new);
2836 out_release_drc_mem:
2837 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
2838 	return status;
2839 }
2840 
2841 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2842 {
2843 	switch (*dir) {
2844 	case NFS4_CDFC4_FORE:
2845 	case NFS4_CDFC4_BACK:
2846 		return nfs_ok;
2847 	case NFS4_CDFC4_FORE_OR_BOTH:
2848 	case NFS4_CDFC4_BACK_OR_BOTH:
2849 		*dir = NFS4_CDFC4_BOTH;
2850 		return nfs_ok;
2851 	};
2852 	return nfserr_inval;
2853 }
2854 
2855 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2856 		struct nfsd4_compound_state *cstate,
2857 		union nfsd4_op_u *u)
2858 {
2859 	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2860 	struct nfsd4_session *session = cstate->session;
2861 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2862 	__be32 status;
2863 
2864 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2865 	if (status)
2866 		return status;
2867 	spin_lock(&nn->client_lock);
2868 	session->se_cb_prog = bc->bc_cb_program;
2869 	session->se_cb_sec = bc->bc_cb_sec;
2870 	spin_unlock(&nn->client_lock);
2871 
2872 	nfsd4_probe_callback(session->se_client);
2873 
2874 	return nfs_ok;
2875 }
2876 
2877 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2878 		     struct nfsd4_compound_state *cstate,
2879 		     union nfsd4_op_u *u)
2880 {
2881 	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2882 	__be32 status;
2883 	struct nfsd4_conn *conn;
2884 	struct nfsd4_session *session;
2885 	struct net *net = SVC_NET(rqstp);
2886 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2887 
2888 	if (!nfsd4_last_compound_op(rqstp))
2889 		return nfserr_not_only_op;
2890 	spin_lock(&nn->client_lock);
2891 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2892 	spin_unlock(&nn->client_lock);
2893 	if (!session)
2894 		goto out_no_session;
2895 	status = nfserr_wrong_cred;
2896 	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2897 		goto out;
2898 	status = nfsd4_map_bcts_dir(&bcts->dir);
2899 	if (status)
2900 		goto out;
2901 	conn = alloc_conn(rqstp, bcts->dir);
2902 	status = nfserr_jukebox;
2903 	if (!conn)
2904 		goto out;
2905 	nfsd4_init_conn(rqstp, conn, session);
2906 	status = nfs_ok;
2907 out:
2908 	nfsd4_put_session(session);
2909 out_no_session:
2910 	return status;
2911 }
2912 
2913 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2914 {
2915 	if (!session)
2916 		return 0;
2917 	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2918 }
2919 
2920 __be32
2921 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2922 		union nfsd4_op_u *u)
2923 {
2924 	struct nfsd4_destroy_session *sessionid = &u->destroy_session;
2925 	struct nfsd4_session *ses;
2926 	__be32 status;
2927 	int ref_held_by_me = 0;
2928 	struct net *net = SVC_NET(r);
2929 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2930 
2931 	status = nfserr_not_only_op;
2932 	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2933 		if (!nfsd4_last_compound_op(r))
2934 			goto out;
2935 		ref_held_by_me++;
2936 	}
2937 	dump_sessionid(__func__, &sessionid->sessionid);
2938 	spin_lock(&nn->client_lock);
2939 	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2940 	if (!ses)
2941 		goto out_client_lock;
2942 	status = nfserr_wrong_cred;
2943 	if (!nfsd4_mach_creds_match(ses->se_client, r))
2944 		goto out_put_session;
2945 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2946 	if (status)
2947 		goto out_put_session;
2948 	unhash_session(ses);
2949 	spin_unlock(&nn->client_lock);
2950 
2951 	nfsd4_probe_callback_sync(ses->se_client);
2952 
2953 	spin_lock(&nn->client_lock);
2954 	status = nfs_ok;
2955 out_put_session:
2956 	nfsd4_put_session_locked(ses);
2957 out_client_lock:
2958 	spin_unlock(&nn->client_lock);
2959 out:
2960 	return status;
2961 }
2962 
2963 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2964 {
2965 	struct nfsd4_conn *c;
2966 
2967 	list_for_each_entry(c, &s->se_conns, cn_persession) {
2968 		if (c->cn_xprt == xpt) {
2969 			return c;
2970 		}
2971 	}
2972 	return NULL;
2973 }
2974 
2975 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2976 {
2977 	struct nfs4_client *clp = ses->se_client;
2978 	struct nfsd4_conn *c;
2979 	__be32 status = nfs_ok;
2980 	int ret;
2981 
2982 	spin_lock(&clp->cl_lock);
2983 	c = __nfsd4_find_conn(new->cn_xprt, ses);
2984 	if (c)
2985 		goto out_free;
2986 	status = nfserr_conn_not_bound_to_session;
2987 	if (clp->cl_mach_cred)
2988 		goto out_free;
2989 	__nfsd4_hash_conn(new, ses);
2990 	spin_unlock(&clp->cl_lock);
2991 	ret = nfsd4_register_conn(new);
2992 	if (ret)
2993 		/* oops; xprt is already down: */
2994 		nfsd4_conn_lost(&new->cn_xpt_user);
2995 	return nfs_ok;
2996 out_free:
2997 	spin_unlock(&clp->cl_lock);
2998 	free_conn(new);
2999 	return status;
3000 }
3001 
3002 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3003 {
3004 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
3005 
3006 	return args->opcnt > session->se_fchannel.maxops;
3007 }
3008 
3009 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3010 				  struct nfsd4_session *session)
3011 {
3012 	struct xdr_buf *xb = &rqstp->rq_arg;
3013 
3014 	return xb->len > session->se_fchannel.maxreq_sz;
3015 }
3016 
3017 static bool replay_matches_cache(struct svc_rqst *rqstp,
3018 		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3019 {
3020 	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3021 
3022 	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3023 	    (bool)seq->cachethis)
3024 		return false;
3025 	/*
3026 	 * If there's an error than the reply can have fewer ops than
3027 	 * the call.  But if we cached a reply with *more* ops than the
3028 	 * call you're sending us now, then this new call is clearly not
3029 	 * really a replay of the old one:
3030 	 */
3031 	if (slot->sl_opcnt < argp->opcnt)
3032 		return false;
3033 	/* This is the only check explicitly called by spec: */
3034 	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3035 		return false;
3036 	/*
3037 	 * There may be more comparisons we could actually do, but the
3038 	 * spec doesn't require us to catch every case where the calls
3039 	 * don't match (that would require caching the call as well as
3040 	 * the reply), so we don't bother.
3041 	 */
3042 	return true;
3043 }
3044 
3045 __be32
3046 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3047 		union nfsd4_op_u *u)
3048 {
3049 	struct nfsd4_sequence *seq = &u->sequence;
3050 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3051 	struct xdr_stream *xdr = &resp->xdr;
3052 	struct nfsd4_session *session;
3053 	struct nfs4_client *clp;
3054 	struct nfsd4_slot *slot;
3055 	struct nfsd4_conn *conn;
3056 	__be32 status;
3057 	int buflen;
3058 	struct net *net = SVC_NET(rqstp);
3059 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3060 
3061 	if (resp->opcnt != 1)
3062 		return nfserr_sequence_pos;
3063 
3064 	/*
3065 	 * Will be either used or freed by nfsd4_sequence_check_conn
3066 	 * below.
3067 	 */
3068 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3069 	if (!conn)
3070 		return nfserr_jukebox;
3071 
3072 	spin_lock(&nn->client_lock);
3073 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3074 	if (!session)
3075 		goto out_no_session;
3076 	clp = session->se_client;
3077 
3078 	status = nfserr_too_many_ops;
3079 	if (nfsd4_session_too_many_ops(rqstp, session))
3080 		goto out_put_session;
3081 
3082 	status = nfserr_req_too_big;
3083 	if (nfsd4_request_too_big(rqstp, session))
3084 		goto out_put_session;
3085 
3086 	status = nfserr_badslot;
3087 	if (seq->slotid >= session->se_fchannel.maxreqs)
3088 		goto out_put_session;
3089 
3090 	slot = session->se_slots[seq->slotid];
3091 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
3092 
3093 	/* We do not negotiate the number of slots yet, so set the
3094 	 * maxslots to the session maxreqs which is used to encode
3095 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
3096 	seq->maxslots = session->se_fchannel.maxreqs;
3097 
3098 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3099 					slot->sl_flags & NFSD4_SLOT_INUSE);
3100 	if (status == nfserr_replay_cache) {
3101 		status = nfserr_seq_misordered;
3102 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3103 			goto out_put_session;
3104 		status = nfserr_seq_false_retry;
3105 		if (!replay_matches_cache(rqstp, seq, slot))
3106 			goto out_put_session;
3107 		cstate->slot = slot;
3108 		cstate->session = session;
3109 		cstate->clp = clp;
3110 		/* Return the cached reply status and set cstate->status
3111 		 * for nfsd4_proc_compound processing */
3112 		status = nfsd4_replay_cache_entry(resp, seq);
3113 		cstate->status = nfserr_replay_cache;
3114 		goto out;
3115 	}
3116 	if (status)
3117 		goto out_put_session;
3118 
3119 	status = nfsd4_sequence_check_conn(conn, session);
3120 	conn = NULL;
3121 	if (status)
3122 		goto out_put_session;
3123 
3124 	buflen = (seq->cachethis) ?
3125 			session->se_fchannel.maxresp_cached :
3126 			session->se_fchannel.maxresp_sz;
3127 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3128 				    nfserr_rep_too_big;
3129 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3130 		goto out_put_session;
3131 	svc_reserve(rqstp, buflen);
3132 
3133 	status = nfs_ok;
3134 	/* Success! bump slot seqid */
3135 	slot->sl_seqid = seq->seqid;
3136 	slot->sl_flags |= NFSD4_SLOT_INUSE;
3137 	if (seq->cachethis)
3138 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3139 	else
3140 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3141 
3142 	cstate->slot = slot;
3143 	cstate->session = session;
3144 	cstate->clp = clp;
3145 
3146 out:
3147 	switch (clp->cl_cb_state) {
3148 	case NFSD4_CB_DOWN:
3149 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3150 		break;
3151 	case NFSD4_CB_FAULT:
3152 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3153 		break;
3154 	default:
3155 		seq->status_flags = 0;
3156 	}
3157 	if (!list_empty(&clp->cl_revoked))
3158 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3159 out_no_session:
3160 	if (conn)
3161 		free_conn(conn);
3162 	spin_unlock(&nn->client_lock);
3163 	return status;
3164 out_put_session:
3165 	nfsd4_put_session_locked(session);
3166 	goto out_no_session;
3167 }
3168 
3169 void
3170 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3171 {
3172 	struct nfsd4_compound_state *cs = &resp->cstate;
3173 
3174 	if (nfsd4_has_session(cs)) {
3175 		if (cs->status != nfserr_replay_cache) {
3176 			nfsd4_store_cache_entry(resp);
3177 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3178 		}
3179 		/* Drop session reference that was taken in nfsd4_sequence() */
3180 		nfsd4_put_session(cs->session);
3181 	} else if (cs->clp)
3182 		put_client_renew(cs->clp);
3183 }
3184 
3185 __be32
3186 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3187 		struct nfsd4_compound_state *cstate,
3188 		union nfsd4_op_u *u)
3189 {
3190 	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3191 	struct nfs4_client *conf, *unconf;
3192 	struct nfs4_client *clp = NULL;
3193 	__be32 status = 0;
3194 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3195 
3196 	spin_lock(&nn->client_lock);
3197 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3198 	conf = find_confirmed_client(&dc->clientid, true, nn);
3199 	WARN_ON_ONCE(conf && unconf);
3200 
3201 	if (conf) {
3202 		if (client_has_state(conf)) {
3203 			status = nfserr_clientid_busy;
3204 			goto out;
3205 		}
3206 		status = mark_client_expired_locked(conf);
3207 		if (status)
3208 			goto out;
3209 		clp = conf;
3210 	} else if (unconf)
3211 		clp = unconf;
3212 	else {
3213 		status = nfserr_stale_clientid;
3214 		goto out;
3215 	}
3216 	if (!nfsd4_mach_creds_match(clp, rqstp)) {
3217 		clp = NULL;
3218 		status = nfserr_wrong_cred;
3219 		goto out;
3220 	}
3221 	unhash_client_locked(clp);
3222 out:
3223 	spin_unlock(&nn->client_lock);
3224 	if (clp)
3225 		expire_client(clp);
3226 	return status;
3227 }
3228 
3229 __be32
3230 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3231 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3232 {
3233 	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3234 	__be32 status = 0;
3235 
3236 	if (rc->rca_one_fs) {
3237 		if (!cstate->current_fh.fh_dentry)
3238 			return nfserr_nofilehandle;
3239 		/*
3240 		 * We don't take advantage of the rca_one_fs case.
3241 		 * That's OK, it's optional, we can safely ignore it.
3242 		 */
3243 		return nfs_ok;
3244 	}
3245 
3246 	status = nfserr_complete_already;
3247 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3248 			     &cstate->session->se_client->cl_flags))
3249 		goto out;
3250 
3251 	status = nfserr_stale_clientid;
3252 	if (is_client_expired(cstate->session->se_client))
3253 		/*
3254 		 * The following error isn't really legal.
3255 		 * But we only get here if the client just explicitly
3256 		 * destroyed the client.  Surely it no longer cares what
3257 		 * error it gets back on an operation for the dead
3258 		 * client.
3259 		 */
3260 		goto out;
3261 
3262 	status = nfs_ok;
3263 	nfsd4_client_record_create(cstate->session->se_client);
3264 out:
3265 	return status;
3266 }
3267 
3268 __be32
3269 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3270 		  union nfsd4_op_u *u)
3271 {
3272 	struct nfsd4_setclientid *setclid = &u->setclientid;
3273 	struct xdr_netobj 	clname = setclid->se_name;
3274 	nfs4_verifier		clverifier = setclid->se_verf;
3275 	struct nfs4_client	*conf, *new;
3276 	struct nfs4_client	*unconf = NULL;
3277 	__be32 			status;
3278 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3279 
3280 	new = create_client(clname, rqstp, &clverifier);
3281 	if (new == NULL)
3282 		return nfserr_jukebox;
3283 	/* Cases below refer to rfc 3530 section 14.2.33: */
3284 	spin_lock(&nn->client_lock);
3285 	conf = find_confirmed_client_by_name(&clname, nn);
3286 	if (conf && client_has_state(conf)) {
3287 		/* case 0: */
3288 		status = nfserr_clid_inuse;
3289 		if (clp_used_exchangeid(conf))
3290 			goto out;
3291 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3292 			char addr_str[INET6_ADDRSTRLEN];
3293 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3294 				 sizeof(addr_str));
3295 			dprintk("NFSD: setclientid: string in use by client "
3296 				"at %s\n", addr_str);
3297 			goto out;
3298 		}
3299 	}
3300 	unconf = find_unconfirmed_client_by_name(&clname, nn);
3301 	if (unconf)
3302 		unhash_client_locked(unconf);
3303 	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3304 		/* case 1: probable callback update */
3305 		copy_clid(new, conf);
3306 		gen_confirm(new, nn);
3307 	} else /* case 4 (new client) or cases 2, 3 (client reboot): */
3308 		gen_clid(new, nn);
3309 	new->cl_minorversion = 0;
3310 	gen_callback(new, setclid, rqstp);
3311 	add_to_unconfirmed(new);
3312 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3313 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3314 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3315 	new = NULL;
3316 	status = nfs_ok;
3317 out:
3318 	spin_unlock(&nn->client_lock);
3319 	if (new)
3320 		free_client(new);
3321 	if (unconf)
3322 		expire_client(unconf);
3323 	return status;
3324 }
3325 
3326 
3327 __be32
3328 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3329 			struct nfsd4_compound_state *cstate,
3330 			union nfsd4_op_u *u)
3331 {
3332 	struct nfsd4_setclientid_confirm *setclientid_confirm =
3333 			&u->setclientid_confirm;
3334 	struct nfs4_client *conf, *unconf;
3335 	struct nfs4_client *old = NULL;
3336 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3337 	clientid_t * clid = &setclientid_confirm->sc_clientid;
3338 	__be32 status;
3339 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3340 
3341 	if (STALE_CLIENTID(clid, nn))
3342 		return nfserr_stale_clientid;
3343 
3344 	spin_lock(&nn->client_lock);
3345 	conf = find_confirmed_client(clid, false, nn);
3346 	unconf = find_unconfirmed_client(clid, false, nn);
3347 	/*
3348 	 * We try hard to give out unique clientid's, so if we get an
3349 	 * attempt to confirm the same clientid with a different cred,
3350 	 * the client may be buggy; this should never happen.
3351 	 *
3352 	 * Nevertheless, RFC 7530 recommends INUSE for this case:
3353 	 */
3354 	status = nfserr_clid_inuse;
3355 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3356 		goto out;
3357 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3358 		goto out;
3359 	/* cases below refer to rfc 3530 section 14.2.34: */
3360 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3361 		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3362 			/* case 2: probable retransmit */
3363 			status = nfs_ok;
3364 		} else /* case 4: client hasn't noticed we rebooted yet? */
3365 			status = nfserr_stale_clientid;
3366 		goto out;
3367 	}
3368 	status = nfs_ok;
3369 	if (conf) { /* case 1: callback update */
3370 		old = unconf;
3371 		unhash_client_locked(old);
3372 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3373 	} else { /* case 3: normal case; new or rebooted client */
3374 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3375 		if (old) {
3376 			status = nfserr_clid_inuse;
3377 			if (client_has_state(old)
3378 					&& !same_creds(&unconf->cl_cred,
3379 							&old->cl_cred))
3380 				goto out;
3381 			status = mark_client_expired_locked(old);
3382 			if (status) {
3383 				old = NULL;
3384 				goto out;
3385 			}
3386 		}
3387 		move_to_confirmed(unconf);
3388 		conf = unconf;
3389 	}
3390 	get_client_locked(conf);
3391 	spin_unlock(&nn->client_lock);
3392 	nfsd4_probe_callback(conf);
3393 	spin_lock(&nn->client_lock);
3394 	put_client_renew_locked(conf);
3395 out:
3396 	spin_unlock(&nn->client_lock);
3397 	if (old)
3398 		expire_client(old);
3399 	return status;
3400 }
3401 
3402 static struct nfs4_file *nfsd4_alloc_file(void)
3403 {
3404 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
3405 }
3406 
3407 /* OPEN Share state helper functions */
3408 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3409 				struct nfs4_file *fp)
3410 {
3411 	lockdep_assert_held(&state_lock);
3412 
3413 	refcount_set(&fp->fi_ref, 1);
3414 	spin_lock_init(&fp->fi_lock);
3415 	INIT_LIST_HEAD(&fp->fi_stateids);
3416 	INIT_LIST_HEAD(&fp->fi_delegations);
3417 	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3418 	fh_copy_shallow(&fp->fi_fhandle, fh);
3419 	fp->fi_deleg_file = NULL;
3420 	fp->fi_had_conflict = false;
3421 	fp->fi_share_deny = 0;
3422 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3423 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
3424 #ifdef CONFIG_NFSD_PNFS
3425 	INIT_LIST_HEAD(&fp->fi_lo_states);
3426 	atomic_set(&fp->fi_lo_recalls, 0);
3427 #endif
3428 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3429 }
3430 
3431 void
3432 nfsd4_free_slabs(void)
3433 {
3434 	kmem_cache_destroy(odstate_slab);
3435 	kmem_cache_destroy(openowner_slab);
3436 	kmem_cache_destroy(lockowner_slab);
3437 	kmem_cache_destroy(file_slab);
3438 	kmem_cache_destroy(stateid_slab);
3439 	kmem_cache_destroy(deleg_slab);
3440 }
3441 
3442 int
3443 nfsd4_init_slabs(void)
3444 {
3445 	openowner_slab = kmem_cache_create("nfsd4_openowners",
3446 			sizeof(struct nfs4_openowner), 0, 0, NULL);
3447 	if (openowner_slab == NULL)
3448 		goto out;
3449 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3450 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
3451 	if (lockowner_slab == NULL)
3452 		goto out_free_openowner_slab;
3453 	file_slab = kmem_cache_create("nfsd4_files",
3454 			sizeof(struct nfs4_file), 0, 0, NULL);
3455 	if (file_slab == NULL)
3456 		goto out_free_lockowner_slab;
3457 	stateid_slab = kmem_cache_create("nfsd4_stateids",
3458 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3459 	if (stateid_slab == NULL)
3460 		goto out_free_file_slab;
3461 	deleg_slab = kmem_cache_create("nfsd4_delegations",
3462 			sizeof(struct nfs4_delegation), 0, 0, NULL);
3463 	if (deleg_slab == NULL)
3464 		goto out_free_stateid_slab;
3465 	odstate_slab = kmem_cache_create("nfsd4_odstate",
3466 			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3467 	if (odstate_slab == NULL)
3468 		goto out_free_deleg_slab;
3469 	return 0;
3470 
3471 out_free_deleg_slab:
3472 	kmem_cache_destroy(deleg_slab);
3473 out_free_stateid_slab:
3474 	kmem_cache_destroy(stateid_slab);
3475 out_free_file_slab:
3476 	kmem_cache_destroy(file_slab);
3477 out_free_lockowner_slab:
3478 	kmem_cache_destroy(lockowner_slab);
3479 out_free_openowner_slab:
3480 	kmem_cache_destroy(openowner_slab);
3481 out:
3482 	dprintk("nfsd4: out of memory while initializing nfsv4\n");
3483 	return -ENOMEM;
3484 }
3485 
3486 static void init_nfs4_replay(struct nfs4_replay *rp)
3487 {
3488 	rp->rp_status = nfserr_serverfault;
3489 	rp->rp_buflen = 0;
3490 	rp->rp_buf = rp->rp_ibuf;
3491 	mutex_init(&rp->rp_mutex);
3492 }
3493 
3494 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3495 		struct nfs4_stateowner *so)
3496 {
3497 	if (!nfsd4_has_session(cstate)) {
3498 		mutex_lock(&so->so_replay.rp_mutex);
3499 		cstate->replay_owner = nfs4_get_stateowner(so);
3500 	}
3501 }
3502 
3503 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3504 {
3505 	struct nfs4_stateowner *so = cstate->replay_owner;
3506 
3507 	if (so != NULL) {
3508 		cstate->replay_owner = NULL;
3509 		mutex_unlock(&so->so_replay.rp_mutex);
3510 		nfs4_put_stateowner(so);
3511 	}
3512 }
3513 
3514 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3515 {
3516 	struct nfs4_stateowner *sop;
3517 
3518 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
3519 	if (!sop)
3520 		return NULL;
3521 
3522 	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3523 	if (!sop->so_owner.data) {
3524 		kmem_cache_free(slab, sop);
3525 		return NULL;
3526 	}
3527 	sop->so_owner.len = owner->len;
3528 
3529 	INIT_LIST_HEAD(&sop->so_stateids);
3530 	sop->so_client = clp;
3531 	init_nfs4_replay(&sop->so_replay);
3532 	atomic_set(&sop->so_count, 1);
3533 	return sop;
3534 }
3535 
3536 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3537 {
3538 	lockdep_assert_held(&clp->cl_lock);
3539 
3540 	list_add(&oo->oo_owner.so_strhash,
3541 		 &clp->cl_ownerstr_hashtbl[strhashval]);
3542 	list_add(&oo->oo_perclient, &clp->cl_openowners);
3543 }
3544 
3545 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3546 {
3547 	unhash_openowner_locked(openowner(so));
3548 }
3549 
3550 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3551 {
3552 	struct nfs4_openowner *oo = openowner(so);
3553 
3554 	kmem_cache_free(openowner_slab, oo);
3555 }
3556 
3557 static const struct nfs4_stateowner_operations openowner_ops = {
3558 	.so_unhash =	nfs4_unhash_openowner,
3559 	.so_free =	nfs4_free_openowner,
3560 };
3561 
3562 static struct nfs4_ol_stateid *
3563 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3564 {
3565 	struct nfs4_ol_stateid *local, *ret = NULL;
3566 	struct nfs4_openowner *oo = open->op_openowner;
3567 
3568 	lockdep_assert_held(&fp->fi_lock);
3569 
3570 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3571 		/* ignore lock owners */
3572 		if (local->st_stateowner->so_is_open_owner == 0)
3573 			continue;
3574 		if (local->st_stateowner != &oo->oo_owner)
3575 			continue;
3576 		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3577 			ret = local;
3578 			refcount_inc(&ret->st_stid.sc_count);
3579 			break;
3580 		}
3581 	}
3582 	return ret;
3583 }
3584 
3585 static __be32
3586 nfsd4_verify_open_stid(struct nfs4_stid *s)
3587 {
3588 	__be32 ret = nfs_ok;
3589 
3590 	switch (s->sc_type) {
3591 	default:
3592 		break;
3593 	case 0:
3594 	case NFS4_CLOSED_STID:
3595 	case NFS4_CLOSED_DELEG_STID:
3596 		ret = nfserr_bad_stateid;
3597 		break;
3598 	case NFS4_REVOKED_DELEG_STID:
3599 		ret = nfserr_deleg_revoked;
3600 	}
3601 	return ret;
3602 }
3603 
3604 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3605 static __be32
3606 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3607 {
3608 	__be32 ret;
3609 
3610 	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
3611 	ret = nfsd4_verify_open_stid(&stp->st_stid);
3612 	if (ret != nfs_ok)
3613 		mutex_unlock(&stp->st_mutex);
3614 	return ret;
3615 }
3616 
3617 static struct nfs4_ol_stateid *
3618 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3619 {
3620 	struct nfs4_ol_stateid *stp;
3621 	for (;;) {
3622 		spin_lock(&fp->fi_lock);
3623 		stp = nfsd4_find_existing_open(fp, open);
3624 		spin_unlock(&fp->fi_lock);
3625 		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3626 			break;
3627 		nfs4_put_stid(&stp->st_stid);
3628 	}
3629 	return stp;
3630 }
3631 
3632 static struct nfs4_openowner *
3633 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3634 			   struct nfsd4_compound_state *cstate)
3635 {
3636 	struct nfs4_client *clp = cstate->clp;
3637 	struct nfs4_openowner *oo, *ret;
3638 
3639 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3640 	if (!oo)
3641 		return NULL;
3642 	oo->oo_owner.so_ops = &openowner_ops;
3643 	oo->oo_owner.so_is_open_owner = 1;
3644 	oo->oo_owner.so_seqid = open->op_seqid;
3645 	oo->oo_flags = 0;
3646 	if (nfsd4_has_session(cstate))
3647 		oo->oo_flags |= NFS4_OO_CONFIRMED;
3648 	oo->oo_time = 0;
3649 	oo->oo_last_closed_stid = NULL;
3650 	INIT_LIST_HEAD(&oo->oo_close_lru);
3651 	spin_lock(&clp->cl_lock);
3652 	ret = find_openstateowner_str_locked(strhashval, open, clp);
3653 	if (ret == NULL) {
3654 		hash_openowner(oo, clp, strhashval);
3655 		ret = oo;
3656 	} else
3657 		nfs4_free_stateowner(&oo->oo_owner);
3658 
3659 	spin_unlock(&clp->cl_lock);
3660 	return ret;
3661 }
3662 
3663 static struct nfs4_ol_stateid *
3664 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3665 {
3666 
3667 	struct nfs4_openowner *oo = open->op_openowner;
3668 	struct nfs4_ol_stateid *retstp = NULL;
3669 	struct nfs4_ol_stateid *stp;
3670 
3671 	stp = open->op_stp;
3672 	/* We are moving these outside of the spinlocks to avoid the warnings */
3673 	mutex_init(&stp->st_mutex);
3674 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
3675 
3676 retry:
3677 	spin_lock(&oo->oo_owner.so_client->cl_lock);
3678 	spin_lock(&fp->fi_lock);
3679 
3680 	retstp = nfsd4_find_existing_open(fp, open);
3681 	if (retstp)
3682 		goto out_unlock;
3683 
3684 	open->op_stp = NULL;
3685 	refcount_inc(&stp->st_stid.sc_count);
3686 	stp->st_stid.sc_type = NFS4_OPEN_STID;
3687 	INIT_LIST_HEAD(&stp->st_locks);
3688 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3689 	get_nfs4_file(fp);
3690 	stp->st_stid.sc_file = fp;
3691 	stp->st_access_bmap = 0;
3692 	stp->st_deny_bmap = 0;
3693 	stp->st_openstp = NULL;
3694 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3695 	list_add(&stp->st_perfile, &fp->fi_stateids);
3696 
3697 out_unlock:
3698 	spin_unlock(&fp->fi_lock);
3699 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
3700 	if (retstp) {
3701 		/* Handle races with CLOSE */
3702 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3703 			nfs4_put_stid(&retstp->st_stid);
3704 			goto retry;
3705 		}
3706 		/* To keep mutex tracking happy */
3707 		mutex_unlock(&stp->st_mutex);
3708 		stp = retstp;
3709 	}
3710 	return stp;
3711 }
3712 
3713 /*
3714  * In the 4.0 case we need to keep the owners around a little while to handle
3715  * CLOSE replay. We still do need to release any file access that is held by
3716  * them before returning however.
3717  */
3718 static void
3719 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3720 {
3721 	struct nfs4_ol_stateid *last;
3722 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
3723 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3724 						nfsd_net_id);
3725 
3726 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3727 
3728 	/*
3729 	 * We know that we hold one reference via nfsd4_close, and another
3730 	 * "persistent" reference for the client. If the refcount is higher
3731 	 * than 2, then there are still calls in progress that are using this
3732 	 * stateid. We can't put the sc_file reference until they are finished.
3733 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
3734 	 * there should be no danger of the refcount going back up again at
3735 	 * this point.
3736 	 */
3737 	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
3738 
3739 	release_all_access(s);
3740 	if (s->st_stid.sc_file) {
3741 		put_nfs4_file(s->st_stid.sc_file);
3742 		s->st_stid.sc_file = NULL;
3743 	}
3744 
3745 	spin_lock(&nn->client_lock);
3746 	last = oo->oo_last_closed_stid;
3747 	oo->oo_last_closed_stid = s;
3748 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3749 	oo->oo_time = get_seconds();
3750 	spin_unlock(&nn->client_lock);
3751 	if (last)
3752 		nfs4_put_stid(&last->st_stid);
3753 }
3754 
3755 /* search file_hashtbl[] for file */
3756 static struct nfs4_file *
3757 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3758 {
3759 	struct nfs4_file *fp;
3760 
3761 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3762 		if (fh_match(&fp->fi_fhandle, fh)) {
3763 			if (refcount_inc_not_zero(&fp->fi_ref))
3764 				return fp;
3765 		}
3766 	}
3767 	return NULL;
3768 }
3769 
3770 struct nfs4_file *
3771 find_file(struct knfsd_fh *fh)
3772 {
3773 	struct nfs4_file *fp;
3774 	unsigned int hashval = file_hashval(fh);
3775 
3776 	rcu_read_lock();
3777 	fp = find_file_locked(fh, hashval);
3778 	rcu_read_unlock();
3779 	return fp;
3780 }
3781 
3782 static struct nfs4_file *
3783 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3784 {
3785 	struct nfs4_file *fp;
3786 	unsigned int hashval = file_hashval(fh);
3787 
3788 	rcu_read_lock();
3789 	fp = find_file_locked(fh, hashval);
3790 	rcu_read_unlock();
3791 	if (fp)
3792 		return fp;
3793 
3794 	spin_lock(&state_lock);
3795 	fp = find_file_locked(fh, hashval);
3796 	if (likely(fp == NULL)) {
3797 		nfsd4_init_file(fh, hashval, new);
3798 		fp = new;
3799 	}
3800 	spin_unlock(&state_lock);
3801 
3802 	return fp;
3803 }
3804 
3805 /*
3806  * Called to check deny when READ with all zero stateid or
3807  * WRITE with all zero or all one stateid
3808  */
3809 static __be32
3810 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3811 {
3812 	struct nfs4_file *fp;
3813 	__be32 ret = nfs_ok;
3814 
3815 	fp = find_file(&current_fh->fh_handle);
3816 	if (!fp)
3817 		return ret;
3818 	/* Check for conflicting share reservations */
3819 	spin_lock(&fp->fi_lock);
3820 	if (fp->fi_share_deny & deny_type)
3821 		ret = nfserr_locked;
3822 	spin_unlock(&fp->fi_lock);
3823 	put_nfs4_file(fp);
3824 	return ret;
3825 }
3826 
3827 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3828 {
3829 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3830 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3831 					  nfsd_net_id);
3832 
3833 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3834 
3835 	/*
3836 	 * We can't do this in nfsd_break_deleg_cb because it is
3837 	 * already holding inode->i_lock.
3838 	 *
3839 	 * If the dl_time != 0, then we know that it has already been
3840 	 * queued for a lease break. Don't queue it again.
3841 	 */
3842 	spin_lock(&state_lock);
3843 	if (dp->dl_time == 0) {
3844 		dp->dl_time = get_seconds();
3845 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3846 	}
3847 	spin_unlock(&state_lock);
3848 }
3849 
3850 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3851 		struct rpc_task *task)
3852 {
3853 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3854 
3855 	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3856 	        return 1;
3857 
3858 	switch (task->tk_status) {
3859 	case 0:
3860 		return 1;
3861 	case -EBADHANDLE:
3862 	case -NFS4ERR_BAD_STATEID:
3863 		/*
3864 		 * Race: client probably got cb_recall before open reply
3865 		 * granting delegation.
3866 		 */
3867 		if (dp->dl_retries--) {
3868 			rpc_delay(task, 2 * HZ);
3869 			return 0;
3870 		}
3871 		/*FALLTHRU*/
3872 	default:
3873 		return -1;
3874 	}
3875 }
3876 
3877 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3878 {
3879 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3880 
3881 	nfs4_put_stid(&dp->dl_stid);
3882 }
3883 
3884 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3885 	.prepare	= nfsd4_cb_recall_prepare,
3886 	.done		= nfsd4_cb_recall_done,
3887 	.release	= nfsd4_cb_recall_release,
3888 };
3889 
3890 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3891 {
3892 	/*
3893 	 * We're assuming the state code never drops its reference
3894 	 * without first removing the lease.  Since we're in this lease
3895 	 * callback (and since the lease code is serialized by the kernel
3896 	 * lock) we know the server hasn't removed the lease yet, we know
3897 	 * it's safe to take a reference.
3898 	 */
3899 	refcount_inc(&dp->dl_stid.sc_count);
3900 	nfsd4_run_cb(&dp->dl_recall);
3901 }
3902 
3903 /* Called from break_lease() with i_lock held. */
3904 static bool
3905 nfsd_break_deleg_cb(struct file_lock *fl)
3906 {
3907 	bool ret = false;
3908 	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3909 	struct nfs4_delegation *dp;
3910 
3911 	if (!fp) {
3912 		WARN(1, "(%p)->fl_owner NULL\n", fl);
3913 		return ret;
3914 	}
3915 	if (fp->fi_had_conflict) {
3916 		WARN(1, "duplicate break on %p\n", fp);
3917 		return ret;
3918 	}
3919 	/*
3920 	 * We don't want the locks code to timeout the lease for us;
3921 	 * we'll remove it ourself if a delegation isn't returned
3922 	 * in time:
3923 	 */
3924 	fl->fl_break_time = 0;
3925 
3926 	spin_lock(&fp->fi_lock);
3927 	fp->fi_had_conflict = true;
3928 	/*
3929 	 * If there are no delegations on the list, then return true
3930 	 * so that the lease code will go ahead and delete it.
3931 	 */
3932 	if (list_empty(&fp->fi_delegations))
3933 		ret = true;
3934 	else
3935 		list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3936 			nfsd_break_one_deleg(dp);
3937 	spin_unlock(&fp->fi_lock);
3938 	return ret;
3939 }
3940 
3941 static int
3942 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3943 		     struct list_head *dispose)
3944 {
3945 	if (arg & F_UNLCK)
3946 		return lease_modify(onlist, arg, dispose);
3947 	else
3948 		return -EAGAIN;
3949 }
3950 
3951 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3952 	.lm_break = nfsd_break_deleg_cb,
3953 	.lm_change = nfsd_change_deleg_cb,
3954 };
3955 
3956 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3957 {
3958 	if (nfsd4_has_session(cstate))
3959 		return nfs_ok;
3960 	if (seqid == so->so_seqid - 1)
3961 		return nfserr_replay_me;
3962 	if (seqid == so->so_seqid)
3963 		return nfs_ok;
3964 	return nfserr_bad_seqid;
3965 }
3966 
3967 static __be32 lookup_clientid(clientid_t *clid,
3968 		struct nfsd4_compound_state *cstate,
3969 		struct nfsd_net *nn)
3970 {
3971 	struct nfs4_client *found;
3972 
3973 	if (cstate->clp) {
3974 		found = cstate->clp;
3975 		if (!same_clid(&found->cl_clientid, clid))
3976 			return nfserr_stale_clientid;
3977 		return nfs_ok;
3978 	}
3979 
3980 	if (STALE_CLIENTID(clid, nn))
3981 		return nfserr_stale_clientid;
3982 
3983 	/*
3984 	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3985 	 * cached already then we know this is for is for v4.0 and "sessions"
3986 	 * will be false.
3987 	 */
3988 	WARN_ON_ONCE(cstate->session);
3989 	spin_lock(&nn->client_lock);
3990 	found = find_confirmed_client(clid, false, nn);
3991 	if (!found) {
3992 		spin_unlock(&nn->client_lock);
3993 		return nfserr_expired;
3994 	}
3995 	atomic_inc(&found->cl_refcount);
3996 	spin_unlock(&nn->client_lock);
3997 
3998 	/* Cache the nfs4_client in cstate! */
3999 	cstate->clp = found;
4000 	return nfs_ok;
4001 }
4002 
4003 __be32
4004 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4005 		    struct nfsd4_open *open, struct nfsd_net *nn)
4006 {
4007 	clientid_t *clientid = &open->op_clientid;
4008 	struct nfs4_client *clp = NULL;
4009 	unsigned int strhashval;
4010 	struct nfs4_openowner *oo = NULL;
4011 	__be32 status;
4012 
4013 	if (STALE_CLIENTID(&open->op_clientid, nn))
4014 		return nfserr_stale_clientid;
4015 	/*
4016 	 * In case we need it later, after we've already created the
4017 	 * file and don't want to risk a further failure:
4018 	 */
4019 	open->op_file = nfsd4_alloc_file();
4020 	if (open->op_file == NULL)
4021 		return nfserr_jukebox;
4022 
4023 	status = lookup_clientid(clientid, cstate, nn);
4024 	if (status)
4025 		return status;
4026 	clp = cstate->clp;
4027 
4028 	strhashval = ownerstr_hashval(&open->op_owner);
4029 	oo = find_openstateowner_str(strhashval, open, clp);
4030 	open->op_openowner = oo;
4031 	if (!oo) {
4032 		goto new_owner;
4033 	}
4034 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4035 		/* Replace unconfirmed owners without checking for replay. */
4036 		release_openowner(oo);
4037 		open->op_openowner = NULL;
4038 		goto new_owner;
4039 	}
4040 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4041 	if (status)
4042 		return status;
4043 	goto alloc_stateid;
4044 new_owner:
4045 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
4046 	if (oo == NULL)
4047 		return nfserr_jukebox;
4048 	open->op_openowner = oo;
4049 alloc_stateid:
4050 	open->op_stp = nfs4_alloc_open_stateid(clp);
4051 	if (!open->op_stp)
4052 		return nfserr_jukebox;
4053 
4054 	if (nfsd4_has_session(cstate) &&
4055 	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4056 		open->op_odstate = alloc_clnt_odstate(clp);
4057 		if (!open->op_odstate)
4058 			return nfserr_jukebox;
4059 	}
4060 
4061 	return nfs_ok;
4062 }
4063 
4064 static inline __be32
4065 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4066 {
4067 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4068 		return nfserr_openmode;
4069 	else
4070 		return nfs_ok;
4071 }
4072 
4073 static int share_access_to_flags(u32 share_access)
4074 {
4075 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4076 }
4077 
4078 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4079 {
4080 	struct nfs4_stid *ret;
4081 
4082 	ret = find_stateid_by_type(cl, s,
4083 				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4084 	if (!ret)
4085 		return NULL;
4086 	return delegstateid(ret);
4087 }
4088 
4089 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4090 {
4091 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4092 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4093 }
4094 
4095 static __be32
4096 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4097 		struct nfs4_delegation **dp)
4098 {
4099 	int flags;
4100 	__be32 status = nfserr_bad_stateid;
4101 	struct nfs4_delegation *deleg;
4102 
4103 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4104 	if (deleg == NULL)
4105 		goto out;
4106 	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4107 		nfs4_put_stid(&deleg->dl_stid);
4108 		if (cl->cl_minorversion)
4109 			status = nfserr_deleg_revoked;
4110 		goto out;
4111 	}
4112 	flags = share_access_to_flags(open->op_share_access);
4113 	status = nfs4_check_delegmode(deleg, flags);
4114 	if (status) {
4115 		nfs4_put_stid(&deleg->dl_stid);
4116 		goto out;
4117 	}
4118 	*dp = deleg;
4119 out:
4120 	if (!nfsd4_is_deleg_cur(open))
4121 		return nfs_ok;
4122 	if (status)
4123 		return status;
4124 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4125 	return nfs_ok;
4126 }
4127 
4128 static inline int nfs4_access_to_access(u32 nfs4_access)
4129 {
4130 	int flags = 0;
4131 
4132 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4133 		flags |= NFSD_MAY_READ;
4134 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4135 		flags |= NFSD_MAY_WRITE;
4136 	return flags;
4137 }
4138 
4139 static inline __be32
4140 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4141 		struct nfsd4_open *open)
4142 {
4143 	struct iattr iattr = {
4144 		.ia_valid = ATTR_SIZE,
4145 		.ia_size = 0,
4146 	};
4147 	if (!open->op_truncate)
4148 		return 0;
4149 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4150 		return nfserr_inval;
4151 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4152 }
4153 
4154 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4155 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4156 		struct nfsd4_open *open)
4157 {
4158 	struct file *filp = NULL;
4159 	__be32 status;
4160 	int oflag = nfs4_access_to_omode(open->op_share_access);
4161 	int access = nfs4_access_to_access(open->op_share_access);
4162 	unsigned char old_access_bmap, old_deny_bmap;
4163 
4164 	spin_lock(&fp->fi_lock);
4165 
4166 	/*
4167 	 * Are we trying to set a deny mode that would conflict with
4168 	 * current access?
4169 	 */
4170 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4171 	if (status != nfs_ok) {
4172 		spin_unlock(&fp->fi_lock);
4173 		goto out;
4174 	}
4175 
4176 	/* set access to the file */
4177 	status = nfs4_file_get_access(fp, open->op_share_access);
4178 	if (status != nfs_ok) {
4179 		spin_unlock(&fp->fi_lock);
4180 		goto out;
4181 	}
4182 
4183 	/* Set access bits in stateid */
4184 	old_access_bmap = stp->st_access_bmap;
4185 	set_access(open->op_share_access, stp);
4186 
4187 	/* Set new deny mask */
4188 	old_deny_bmap = stp->st_deny_bmap;
4189 	set_deny(open->op_share_deny, stp);
4190 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4191 
4192 	if (!fp->fi_fds[oflag]) {
4193 		spin_unlock(&fp->fi_lock);
4194 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4195 		if (status)
4196 			goto out_put_access;
4197 		spin_lock(&fp->fi_lock);
4198 		if (!fp->fi_fds[oflag]) {
4199 			fp->fi_fds[oflag] = filp;
4200 			filp = NULL;
4201 		}
4202 	}
4203 	spin_unlock(&fp->fi_lock);
4204 	if (filp)
4205 		fput(filp);
4206 
4207 	status = nfsd4_truncate(rqstp, cur_fh, open);
4208 	if (status)
4209 		goto out_put_access;
4210 out:
4211 	return status;
4212 out_put_access:
4213 	stp->st_access_bmap = old_access_bmap;
4214 	nfs4_file_put_access(fp, open->op_share_access);
4215 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4216 	goto out;
4217 }
4218 
4219 static __be32
4220 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4221 {
4222 	__be32 status;
4223 	unsigned char old_deny_bmap = stp->st_deny_bmap;
4224 
4225 	if (!test_access(open->op_share_access, stp))
4226 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4227 
4228 	/* test and set deny mode */
4229 	spin_lock(&fp->fi_lock);
4230 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4231 	if (status == nfs_ok) {
4232 		set_deny(open->op_share_deny, stp);
4233 		fp->fi_share_deny |=
4234 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4235 	}
4236 	spin_unlock(&fp->fi_lock);
4237 
4238 	if (status != nfs_ok)
4239 		return status;
4240 
4241 	status = nfsd4_truncate(rqstp, cur_fh, open);
4242 	if (status != nfs_ok)
4243 		reset_union_bmap_deny(old_deny_bmap, stp);
4244 	return status;
4245 }
4246 
4247 /* Should we give out recallable state?: */
4248 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4249 {
4250 	if (clp->cl_cb_state == NFSD4_CB_UP)
4251 		return true;
4252 	/*
4253 	 * In the sessions case, since we don't have to establish a
4254 	 * separate connection for callbacks, we assume it's OK
4255 	 * until we hear otherwise:
4256 	 */
4257 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4258 }
4259 
4260 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4261 {
4262 	struct file_lock *fl;
4263 
4264 	fl = locks_alloc_lock();
4265 	if (!fl)
4266 		return NULL;
4267 	fl->fl_lmops = &nfsd_lease_mng_ops;
4268 	fl->fl_flags = FL_DELEG;
4269 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4270 	fl->fl_end = OFFSET_MAX;
4271 	fl->fl_owner = (fl_owner_t)fp;
4272 	fl->fl_pid = current->tgid;
4273 	return fl;
4274 }
4275 
4276 /**
4277  * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4278  * @dp:   a pointer to the nfs4_delegation we're adding.
4279  *
4280  * Return:
4281  *      On success: Return code will be 0 on success.
4282  *
4283  *      On error: -EAGAIN if there was an existing delegation.
4284  *                 nonzero if there is an error in other cases.
4285  *
4286  */
4287 
4288 static int nfs4_setlease(struct nfs4_delegation *dp)
4289 {
4290 	struct nfs4_file *fp = dp->dl_stid.sc_file;
4291 	struct file_lock *fl;
4292 	struct file *filp;
4293 	int status = 0;
4294 
4295 	fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4296 	if (!fl)
4297 		return -ENOMEM;
4298 	filp = find_readable_file(fp);
4299 	if (!filp) {
4300 		/* We should always have a readable file here */
4301 		WARN_ON_ONCE(1);
4302 		locks_free_lock(fl);
4303 		return -EBADF;
4304 	}
4305 	fl->fl_file = filp;
4306 	status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4307 	if (fl)
4308 		locks_free_lock(fl);
4309 	if (status)
4310 		goto out_fput;
4311 	spin_lock(&state_lock);
4312 	spin_lock(&fp->fi_lock);
4313 	/* Did the lease get broken before we took the lock? */
4314 	status = -EAGAIN;
4315 	if (fp->fi_had_conflict)
4316 		goto out_unlock;
4317 	/* Race breaker */
4318 	if (fp->fi_deleg_file) {
4319 		status = hash_delegation_locked(dp, fp);
4320 		goto out_unlock;
4321 	}
4322 	fp->fi_deleg_file = filp;
4323 	fp->fi_delegees = 0;
4324 	status = hash_delegation_locked(dp, fp);
4325 	spin_unlock(&fp->fi_lock);
4326 	spin_unlock(&state_lock);
4327 	if (status) {
4328 		/* Should never happen, this is a new fi_deleg_file  */
4329 		WARN_ON_ONCE(1);
4330 		goto out_fput;
4331 	}
4332 	return 0;
4333 out_unlock:
4334 	spin_unlock(&fp->fi_lock);
4335 	spin_unlock(&state_lock);
4336 out_fput:
4337 	fput(filp);
4338 	return status;
4339 }
4340 
4341 static struct nfs4_delegation *
4342 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4343 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4344 {
4345 	int status;
4346 	struct nfs4_delegation *dp;
4347 
4348 	if (fp->fi_had_conflict)
4349 		return ERR_PTR(-EAGAIN);
4350 
4351 	spin_lock(&state_lock);
4352 	spin_lock(&fp->fi_lock);
4353 	status = nfs4_get_existing_delegation(clp, fp);
4354 	spin_unlock(&fp->fi_lock);
4355 	spin_unlock(&state_lock);
4356 
4357 	if (status)
4358 		return ERR_PTR(status);
4359 
4360 	dp = alloc_init_deleg(clp, fh, odstate);
4361 	if (!dp)
4362 		return ERR_PTR(-ENOMEM);
4363 
4364 	get_nfs4_file(fp);
4365 	spin_lock(&state_lock);
4366 	spin_lock(&fp->fi_lock);
4367 	dp->dl_stid.sc_file = fp;
4368 	if (!fp->fi_deleg_file) {
4369 		spin_unlock(&fp->fi_lock);
4370 		spin_unlock(&state_lock);
4371 		status = nfs4_setlease(dp);
4372 		goto out;
4373 	}
4374 	if (fp->fi_had_conflict) {
4375 		status = -EAGAIN;
4376 		goto out_unlock;
4377 	}
4378 	status = hash_delegation_locked(dp, fp);
4379 out_unlock:
4380 	spin_unlock(&fp->fi_lock);
4381 	spin_unlock(&state_lock);
4382 out:
4383 	if (status) {
4384 		put_clnt_odstate(dp->dl_clnt_odstate);
4385 		nfs4_put_stid(&dp->dl_stid);
4386 		return ERR_PTR(status);
4387 	}
4388 	return dp;
4389 }
4390 
4391 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4392 {
4393 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4394 	if (status == -EAGAIN)
4395 		open->op_why_no_deleg = WND4_CONTENTION;
4396 	else {
4397 		open->op_why_no_deleg = WND4_RESOURCE;
4398 		switch (open->op_deleg_want) {
4399 		case NFS4_SHARE_WANT_READ_DELEG:
4400 		case NFS4_SHARE_WANT_WRITE_DELEG:
4401 		case NFS4_SHARE_WANT_ANY_DELEG:
4402 			break;
4403 		case NFS4_SHARE_WANT_CANCEL:
4404 			open->op_why_no_deleg = WND4_CANCELLED;
4405 			break;
4406 		case NFS4_SHARE_WANT_NO_DELEG:
4407 			WARN_ON_ONCE(1);
4408 		}
4409 	}
4410 }
4411 
4412 /*
4413  * Attempt to hand out a delegation.
4414  *
4415  * Note we don't support write delegations, and won't until the vfs has
4416  * proper support for them.
4417  */
4418 static void
4419 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4420 			struct nfs4_ol_stateid *stp)
4421 {
4422 	struct nfs4_delegation *dp;
4423 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4424 	struct nfs4_client *clp = stp->st_stid.sc_client;
4425 	int cb_up;
4426 	int status = 0;
4427 
4428 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4429 	open->op_recall = 0;
4430 	switch (open->op_claim_type) {
4431 		case NFS4_OPEN_CLAIM_PREVIOUS:
4432 			if (!cb_up)
4433 				open->op_recall = 1;
4434 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4435 				goto out_no_deleg;
4436 			break;
4437 		case NFS4_OPEN_CLAIM_NULL:
4438 		case NFS4_OPEN_CLAIM_FH:
4439 			/*
4440 			 * Let's not give out any delegations till everyone's
4441 			 * had the chance to reclaim theirs, *and* until
4442 			 * NLM locks have all been reclaimed:
4443 			 */
4444 			if (locks_in_grace(clp->net))
4445 				goto out_no_deleg;
4446 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4447 				goto out_no_deleg;
4448 			/*
4449 			 * Also, if the file was opened for write or
4450 			 * create, there's a good chance the client's
4451 			 * about to write to it, resulting in an
4452 			 * immediate recall (since we don't support
4453 			 * write delegations):
4454 			 */
4455 			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4456 				goto out_no_deleg;
4457 			if (open->op_create == NFS4_OPEN_CREATE)
4458 				goto out_no_deleg;
4459 			break;
4460 		default:
4461 			goto out_no_deleg;
4462 	}
4463 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4464 	if (IS_ERR(dp))
4465 		goto out_no_deleg;
4466 
4467 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4468 
4469 	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4470 		STATEID_VAL(&dp->dl_stid.sc_stateid));
4471 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4472 	nfs4_put_stid(&dp->dl_stid);
4473 	return;
4474 out_no_deleg:
4475 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4476 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4477 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4478 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4479 		open->op_recall = 1;
4480 	}
4481 
4482 	/* 4.1 client asking for a delegation? */
4483 	if (open->op_deleg_want)
4484 		nfsd4_open_deleg_none_ext(open, status);
4485 	return;
4486 }
4487 
4488 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4489 					struct nfs4_delegation *dp)
4490 {
4491 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4492 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4493 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4494 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4495 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4496 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4497 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4498 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4499 	}
4500 	/* Otherwise the client must be confused wanting a delegation
4501 	 * it already has, therefore we don't return
4502 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4503 	 */
4504 }
4505 
4506 __be32
4507 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4508 {
4509 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
4510 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4511 	struct nfs4_file *fp = NULL;
4512 	struct nfs4_ol_stateid *stp = NULL;
4513 	struct nfs4_delegation *dp = NULL;
4514 	__be32 status;
4515 	bool new_stp = false;
4516 
4517 	/*
4518 	 * Lookup file; if found, lookup stateid and check open request,
4519 	 * and check for delegations in the process of being recalled.
4520 	 * If not found, create the nfs4_file struct
4521 	 */
4522 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4523 	if (fp != open->op_file) {
4524 		status = nfs4_check_deleg(cl, open, &dp);
4525 		if (status)
4526 			goto out;
4527 		stp = nfsd4_find_and_lock_existing_open(fp, open);
4528 	} else {
4529 		open->op_file = NULL;
4530 		status = nfserr_bad_stateid;
4531 		if (nfsd4_is_deleg_cur(open))
4532 			goto out;
4533 	}
4534 
4535 	if (!stp) {
4536 		stp = init_open_stateid(fp, open);
4537 		if (!open->op_stp)
4538 			new_stp = true;
4539 	}
4540 
4541 	/*
4542 	 * OPEN the file, or upgrade an existing OPEN.
4543 	 * If truncate fails, the OPEN fails.
4544 	 *
4545 	 * stp is already locked.
4546 	 */
4547 	if (!new_stp) {
4548 		/* Stateid was found, this is an OPEN upgrade */
4549 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4550 		if (status) {
4551 			mutex_unlock(&stp->st_mutex);
4552 			goto out;
4553 		}
4554 	} else {
4555 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4556 		if (status) {
4557 			stp->st_stid.sc_type = NFS4_CLOSED_STID;
4558 			release_open_stateid(stp);
4559 			mutex_unlock(&stp->st_mutex);
4560 			goto out;
4561 		}
4562 
4563 		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4564 							open->op_odstate);
4565 		if (stp->st_clnt_odstate == open->op_odstate)
4566 			open->op_odstate = NULL;
4567 	}
4568 
4569 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4570 	mutex_unlock(&stp->st_mutex);
4571 
4572 	if (nfsd4_has_session(&resp->cstate)) {
4573 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4574 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4575 			open->op_why_no_deleg = WND4_NOT_WANTED;
4576 			goto nodeleg;
4577 		}
4578 	}
4579 
4580 	/*
4581 	* Attempt to hand out a delegation. No error return, because the
4582 	* OPEN succeeds even if we fail.
4583 	*/
4584 	nfs4_open_delegation(current_fh, open, stp);
4585 nodeleg:
4586 	status = nfs_ok;
4587 
4588 	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4589 		STATEID_VAL(&stp->st_stid.sc_stateid));
4590 out:
4591 	/* 4.1 client trying to upgrade/downgrade delegation? */
4592 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4593 	    open->op_deleg_want)
4594 		nfsd4_deleg_xgrade_none_ext(open, dp);
4595 
4596 	if (fp)
4597 		put_nfs4_file(fp);
4598 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4599 		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4600 	/*
4601 	* To finish the open response, we just need to set the rflags.
4602 	*/
4603 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4604 	if (nfsd4_has_session(&resp->cstate))
4605 		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4606 	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4607 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4608 
4609 	if (dp)
4610 		nfs4_put_stid(&dp->dl_stid);
4611 	if (stp)
4612 		nfs4_put_stid(&stp->st_stid);
4613 
4614 	return status;
4615 }
4616 
4617 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4618 			      struct nfsd4_open *open)
4619 {
4620 	if (open->op_openowner) {
4621 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4622 
4623 		nfsd4_cstate_assign_replay(cstate, so);
4624 		nfs4_put_stateowner(so);
4625 	}
4626 	if (open->op_file)
4627 		kmem_cache_free(file_slab, open->op_file);
4628 	if (open->op_stp)
4629 		nfs4_put_stid(&open->op_stp->st_stid);
4630 	if (open->op_odstate)
4631 		kmem_cache_free(odstate_slab, open->op_odstate);
4632 }
4633 
4634 __be32
4635 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4636 	    union nfsd4_op_u *u)
4637 {
4638 	clientid_t *clid = &u->renew;
4639 	struct nfs4_client *clp;
4640 	__be32 status;
4641 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4642 
4643 	dprintk("process_renew(%08x/%08x): starting\n",
4644 			clid->cl_boot, clid->cl_id);
4645 	status = lookup_clientid(clid, cstate, nn);
4646 	if (status)
4647 		goto out;
4648 	clp = cstate->clp;
4649 	status = nfserr_cb_path_down;
4650 	if (!list_empty(&clp->cl_delegations)
4651 			&& clp->cl_cb_state != NFSD4_CB_UP)
4652 		goto out;
4653 	status = nfs_ok;
4654 out:
4655 	return status;
4656 }
4657 
4658 void
4659 nfsd4_end_grace(struct nfsd_net *nn)
4660 {
4661 	/* do nothing if grace period already ended */
4662 	if (nn->grace_ended)
4663 		return;
4664 
4665 	dprintk("NFSD: end of grace period\n");
4666 	nn->grace_ended = true;
4667 	/*
4668 	 * If the server goes down again right now, an NFSv4
4669 	 * client will still be allowed to reclaim after it comes back up,
4670 	 * even if it hasn't yet had a chance to reclaim state this time.
4671 	 *
4672 	 */
4673 	nfsd4_record_grace_done(nn);
4674 	/*
4675 	 * At this point, NFSv4 clients can still reclaim.  But if the
4676 	 * server crashes, any that have not yet reclaimed will be out
4677 	 * of luck on the next boot.
4678 	 *
4679 	 * (NFSv4.1+ clients are considered to have reclaimed once they
4680 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
4681 	 * have reclaimed after their first OPEN.)
4682 	 */
4683 	locks_end_grace(&nn->nfsd4_manager);
4684 	/*
4685 	 * At this point, and once lockd and/or any other containers
4686 	 * exit their grace period, further reclaims will fail and
4687 	 * regular locking can resume.
4688 	 */
4689 }
4690 
4691 static time_t
4692 nfs4_laundromat(struct nfsd_net *nn)
4693 {
4694 	struct nfs4_client *clp;
4695 	struct nfs4_openowner *oo;
4696 	struct nfs4_delegation *dp;
4697 	struct nfs4_ol_stateid *stp;
4698 	struct nfsd4_blocked_lock *nbl;
4699 	struct list_head *pos, *next, reaplist;
4700 	time_t cutoff = get_seconds() - nn->nfsd4_lease;
4701 	time_t t, new_timeo = nn->nfsd4_lease;
4702 
4703 	dprintk("NFSD: laundromat service - starting\n");
4704 	nfsd4_end_grace(nn);
4705 	INIT_LIST_HEAD(&reaplist);
4706 	spin_lock(&nn->client_lock);
4707 	list_for_each_safe(pos, next, &nn->client_lru) {
4708 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4709 		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4710 			t = clp->cl_time - cutoff;
4711 			new_timeo = min(new_timeo, t);
4712 			break;
4713 		}
4714 		if (mark_client_expired_locked(clp)) {
4715 			dprintk("NFSD: client in use (clientid %08x)\n",
4716 				clp->cl_clientid.cl_id);
4717 			continue;
4718 		}
4719 		list_add(&clp->cl_lru, &reaplist);
4720 	}
4721 	spin_unlock(&nn->client_lock);
4722 	list_for_each_safe(pos, next, &reaplist) {
4723 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4724 		dprintk("NFSD: purging unused client (clientid %08x)\n",
4725 			clp->cl_clientid.cl_id);
4726 		list_del_init(&clp->cl_lru);
4727 		expire_client(clp);
4728 	}
4729 	spin_lock(&state_lock);
4730 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
4731 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4732 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4733 			t = dp->dl_time - cutoff;
4734 			new_timeo = min(new_timeo, t);
4735 			break;
4736 		}
4737 		WARN_ON(!unhash_delegation_locked(dp));
4738 		list_add(&dp->dl_recall_lru, &reaplist);
4739 	}
4740 	spin_unlock(&state_lock);
4741 	while (!list_empty(&reaplist)) {
4742 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
4743 					dl_recall_lru);
4744 		list_del_init(&dp->dl_recall_lru);
4745 		revoke_delegation(dp);
4746 	}
4747 
4748 	spin_lock(&nn->client_lock);
4749 	while (!list_empty(&nn->close_lru)) {
4750 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4751 					oo_close_lru);
4752 		if (time_after((unsigned long)oo->oo_time,
4753 			       (unsigned long)cutoff)) {
4754 			t = oo->oo_time - cutoff;
4755 			new_timeo = min(new_timeo, t);
4756 			break;
4757 		}
4758 		list_del_init(&oo->oo_close_lru);
4759 		stp = oo->oo_last_closed_stid;
4760 		oo->oo_last_closed_stid = NULL;
4761 		spin_unlock(&nn->client_lock);
4762 		nfs4_put_stid(&stp->st_stid);
4763 		spin_lock(&nn->client_lock);
4764 	}
4765 	spin_unlock(&nn->client_lock);
4766 
4767 	/*
4768 	 * It's possible for a client to try and acquire an already held lock
4769 	 * that is being held for a long time, and then lose interest in it.
4770 	 * So, we clean out any un-revisited request after a lease period
4771 	 * under the assumption that the client is no longer interested.
4772 	 *
4773 	 * RFC5661, sec. 9.6 states that the client must not rely on getting
4774 	 * notifications and must continue to poll for locks, even when the
4775 	 * server supports them. Thus this shouldn't lead to clients blocking
4776 	 * indefinitely once the lock does become free.
4777 	 */
4778 	BUG_ON(!list_empty(&reaplist));
4779 	spin_lock(&nn->blocked_locks_lock);
4780 	while (!list_empty(&nn->blocked_locks_lru)) {
4781 		nbl = list_first_entry(&nn->blocked_locks_lru,
4782 					struct nfsd4_blocked_lock, nbl_lru);
4783 		if (time_after((unsigned long)nbl->nbl_time,
4784 			       (unsigned long)cutoff)) {
4785 			t = nbl->nbl_time - cutoff;
4786 			new_timeo = min(new_timeo, t);
4787 			break;
4788 		}
4789 		list_move(&nbl->nbl_lru, &reaplist);
4790 		list_del_init(&nbl->nbl_list);
4791 	}
4792 	spin_unlock(&nn->blocked_locks_lock);
4793 
4794 	while (!list_empty(&reaplist)) {
4795 		nbl = list_first_entry(&reaplist,
4796 					struct nfsd4_blocked_lock, nbl_lru);
4797 		list_del_init(&nbl->nbl_lru);
4798 		posix_unblock_lock(&nbl->nbl_lock);
4799 		free_blocked_lock(nbl);
4800 	}
4801 
4802 	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4803 	return new_timeo;
4804 }
4805 
4806 static struct workqueue_struct *laundry_wq;
4807 static void laundromat_main(struct work_struct *);
4808 
4809 static void
4810 laundromat_main(struct work_struct *laundry)
4811 {
4812 	time_t t;
4813 	struct delayed_work *dwork = to_delayed_work(laundry);
4814 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4815 					   laundromat_work);
4816 
4817 	t = nfs4_laundromat(nn);
4818 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4819 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4820 }
4821 
4822 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4823 {
4824 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4825 		return nfserr_bad_stateid;
4826 	return nfs_ok;
4827 }
4828 
4829 static inline int
4830 access_permit_read(struct nfs4_ol_stateid *stp)
4831 {
4832 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4833 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4834 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4835 }
4836 
4837 static inline int
4838 access_permit_write(struct nfs4_ol_stateid *stp)
4839 {
4840 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4841 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4842 }
4843 
4844 static
4845 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4846 {
4847         __be32 status = nfserr_openmode;
4848 
4849 	/* For lock stateid's, we test the parent open, not the lock: */
4850 	if (stp->st_openstp)
4851 		stp = stp->st_openstp;
4852 	if ((flags & WR_STATE) && !access_permit_write(stp))
4853                 goto out;
4854 	if ((flags & RD_STATE) && !access_permit_read(stp))
4855                 goto out;
4856 	status = nfs_ok;
4857 out:
4858 	return status;
4859 }
4860 
4861 static inline __be32
4862 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4863 {
4864 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
4865 		return nfs_ok;
4866 	else if (opens_in_grace(net)) {
4867 		/* Answer in remaining cases depends on existence of
4868 		 * conflicting state; so we must wait out the grace period. */
4869 		return nfserr_grace;
4870 	} else if (flags & WR_STATE)
4871 		return nfs4_share_conflict(current_fh,
4872 				NFS4_SHARE_DENY_WRITE);
4873 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4874 		return nfs4_share_conflict(current_fh,
4875 				NFS4_SHARE_DENY_READ);
4876 }
4877 
4878 /*
4879  * Allow READ/WRITE during grace period on recovered state only for files
4880  * that are not able to provide mandatory locking.
4881  */
4882 static inline int
4883 grace_disallows_io(struct net *net, struct inode *inode)
4884 {
4885 	return opens_in_grace(net) && mandatory_lock(inode);
4886 }
4887 
4888 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4889 {
4890 	/*
4891 	 * When sessions are used the stateid generation number is ignored
4892 	 * when it is zero.
4893 	 */
4894 	if (has_session && in->si_generation == 0)
4895 		return nfs_ok;
4896 
4897 	if (in->si_generation == ref->si_generation)
4898 		return nfs_ok;
4899 
4900 	/* If the client sends us a stateid from the future, it's buggy: */
4901 	if (nfsd4_stateid_generation_after(in, ref))
4902 		return nfserr_bad_stateid;
4903 	/*
4904 	 * However, we could see a stateid from the past, even from a
4905 	 * non-buggy client.  For example, if the client sends a lock
4906 	 * while some IO is outstanding, the lock may bump si_generation
4907 	 * while the IO is still in flight.  The client could avoid that
4908 	 * situation by waiting for responses on all the IO requests,
4909 	 * but better performance may result in retrying IO that
4910 	 * receives an old_stateid error if requests are rarely
4911 	 * reordered in flight:
4912 	 */
4913 	return nfserr_old_stateid;
4914 }
4915 
4916 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
4917 {
4918 	__be32 ret;
4919 
4920 	spin_lock(&s->sc_lock);
4921 	ret = nfsd4_verify_open_stid(s);
4922 	if (ret == nfs_ok)
4923 		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
4924 	spin_unlock(&s->sc_lock);
4925 	return ret;
4926 }
4927 
4928 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4929 {
4930 	if (ols->st_stateowner->so_is_open_owner &&
4931 	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4932 		return nfserr_bad_stateid;
4933 	return nfs_ok;
4934 }
4935 
4936 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4937 {
4938 	struct nfs4_stid *s;
4939 	__be32 status = nfserr_bad_stateid;
4940 
4941 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4942 		CLOSE_STATEID(stateid))
4943 		return status;
4944 	/* Client debugging aid. */
4945 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4946 		char addr_str[INET6_ADDRSTRLEN];
4947 		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4948 				 sizeof(addr_str));
4949 		pr_warn_ratelimited("NFSD: client %s testing state ID "
4950 					"with incorrect client ID\n", addr_str);
4951 		return status;
4952 	}
4953 	spin_lock(&cl->cl_lock);
4954 	s = find_stateid_locked(cl, stateid);
4955 	if (!s)
4956 		goto out_unlock;
4957 	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
4958 	if (status)
4959 		goto out_unlock;
4960 	switch (s->sc_type) {
4961 	case NFS4_DELEG_STID:
4962 		status = nfs_ok;
4963 		break;
4964 	case NFS4_REVOKED_DELEG_STID:
4965 		status = nfserr_deleg_revoked;
4966 		break;
4967 	case NFS4_OPEN_STID:
4968 	case NFS4_LOCK_STID:
4969 		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4970 		break;
4971 	default:
4972 		printk("unknown stateid type %x\n", s->sc_type);
4973 		/* Fallthrough */
4974 	case NFS4_CLOSED_STID:
4975 	case NFS4_CLOSED_DELEG_STID:
4976 		status = nfserr_bad_stateid;
4977 	}
4978 out_unlock:
4979 	spin_unlock(&cl->cl_lock);
4980 	return status;
4981 }
4982 
4983 __be32
4984 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4985 		     stateid_t *stateid, unsigned char typemask,
4986 		     struct nfs4_stid **s, struct nfsd_net *nn)
4987 {
4988 	__be32 status;
4989 	bool return_revoked = false;
4990 
4991 	/*
4992 	 *  only return revoked delegations if explicitly asked.
4993 	 *  otherwise we report revoked or bad_stateid status.
4994 	 */
4995 	if (typemask & NFS4_REVOKED_DELEG_STID)
4996 		return_revoked = true;
4997 	else if (typemask & NFS4_DELEG_STID)
4998 		typemask |= NFS4_REVOKED_DELEG_STID;
4999 
5000 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5001 		CLOSE_STATEID(stateid))
5002 		return nfserr_bad_stateid;
5003 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5004 	if (status == nfserr_stale_clientid) {
5005 		if (cstate->session)
5006 			return nfserr_bad_stateid;
5007 		return nfserr_stale_stateid;
5008 	}
5009 	if (status)
5010 		return status;
5011 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
5012 	if (!*s)
5013 		return nfserr_bad_stateid;
5014 	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5015 		nfs4_put_stid(*s);
5016 		if (cstate->minorversion)
5017 			return nfserr_deleg_revoked;
5018 		return nfserr_bad_stateid;
5019 	}
5020 	return nfs_ok;
5021 }
5022 
5023 static struct file *
5024 nfs4_find_file(struct nfs4_stid *s, int flags)
5025 {
5026 	if (!s)
5027 		return NULL;
5028 
5029 	switch (s->sc_type) {
5030 	case NFS4_DELEG_STID:
5031 		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5032 			return NULL;
5033 		return get_file(s->sc_file->fi_deleg_file);
5034 	case NFS4_OPEN_STID:
5035 	case NFS4_LOCK_STID:
5036 		if (flags & RD_STATE)
5037 			return find_readable_file(s->sc_file);
5038 		else
5039 			return find_writeable_file(s->sc_file);
5040 		break;
5041 	}
5042 
5043 	return NULL;
5044 }
5045 
5046 static __be32
5047 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5048 {
5049 	__be32 status;
5050 
5051 	status = nfsd4_check_openowner_confirmed(ols);
5052 	if (status)
5053 		return status;
5054 	return nfs4_check_openmode(ols, flags);
5055 }
5056 
5057 static __be32
5058 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5059 		struct file **filpp, bool *tmp_file, int flags)
5060 {
5061 	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5062 	struct file *file;
5063 	__be32 status;
5064 
5065 	file = nfs4_find_file(s, flags);
5066 	if (file) {
5067 		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5068 				acc | NFSD_MAY_OWNER_OVERRIDE);
5069 		if (status) {
5070 			fput(file);
5071 			return status;
5072 		}
5073 
5074 		*filpp = file;
5075 	} else {
5076 		status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5077 		if (status)
5078 			return status;
5079 
5080 		if (tmp_file)
5081 			*tmp_file = true;
5082 	}
5083 
5084 	return 0;
5085 }
5086 
5087 /*
5088  * Checks for stateid operations
5089  */
5090 __be32
5091 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5092 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5093 		stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5094 {
5095 	struct inode *ino = d_inode(fhp->fh_dentry);
5096 	struct net *net = SVC_NET(rqstp);
5097 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5098 	struct nfs4_stid *s = NULL;
5099 	__be32 status;
5100 
5101 	if (filpp)
5102 		*filpp = NULL;
5103 	if (tmp_file)
5104 		*tmp_file = false;
5105 
5106 	if (grace_disallows_io(net, ino))
5107 		return nfserr_grace;
5108 
5109 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5110 		status = check_special_stateids(net, fhp, stateid, flags);
5111 		goto done;
5112 	}
5113 
5114 	status = nfsd4_lookup_stateid(cstate, stateid,
5115 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5116 				&s, nn);
5117 	if (status)
5118 		return status;
5119 	status = nfsd4_stid_check_stateid_generation(stateid, s,
5120 			nfsd4_has_session(cstate));
5121 	if (status)
5122 		goto out;
5123 
5124 	switch (s->sc_type) {
5125 	case NFS4_DELEG_STID:
5126 		status = nfs4_check_delegmode(delegstateid(s), flags);
5127 		break;
5128 	case NFS4_OPEN_STID:
5129 	case NFS4_LOCK_STID:
5130 		status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5131 		break;
5132 	default:
5133 		status = nfserr_bad_stateid;
5134 		break;
5135 	}
5136 	if (status)
5137 		goto out;
5138 	status = nfs4_check_fh(fhp, s);
5139 
5140 done:
5141 	if (!status && filpp)
5142 		status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5143 out:
5144 	if (s)
5145 		nfs4_put_stid(s);
5146 	return status;
5147 }
5148 
5149 /*
5150  * Test if the stateid is valid
5151  */
5152 __be32
5153 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5154 		   union nfsd4_op_u *u)
5155 {
5156 	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5157 	struct nfsd4_test_stateid_id *stateid;
5158 	struct nfs4_client *cl = cstate->session->se_client;
5159 
5160 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5161 		stateid->ts_id_status =
5162 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5163 
5164 	return nfs_ok;
5165 }
5166 
5167 static __be32
5168 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5169 {
5170 	struct nfs4_ol_stateid *stp = openlockstateid(s);
5171 	__be32 ret;
5172 
5173 	ret = nfsd4_lock_ol_stateid(stp);
5174 	if (ret)
5175 		goto out_put_stid;
5176 
5177 	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5178 	if (ret)
5179 		goto out;
5180 
5181 	ret = nfserr_locks_held;
5182 	if (check_for_locks(stp->st_stid.sc_file,
5183 			    lockowner(stp->st_stateowner)))
5184 		goto out;
5185 
5186 	release_lock_stateid(stp);
5187 	ret = nfs_ok;
5188 
5189 out:
5190 	mutex_unlock(&stp->st_mutex);
5191 out_put_stid:
5192 	nfs4_put_stid(s);
5193 	return ret;
5194 }
5195 
5196 __be32
5197 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5198 		   union nfsd4_op_u *u)
5199 {
5200 	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5201 	stateid_t *stateid = &free_stateid->fr_stateid;
5202 	struct nfs4_stid *s;
5203 	struct nfs4_delegation *dp;
5204 	struct nfs4_client *cl = cstate->session->se_client;
5205 	__be32 ret = nfserr_bad_stateid;
5206 
5207 	spin_lock(&cl->cl_lock);
5208 	s = find_stateid_locked(cl, stateid);
5209 	if (!s)
5210 		goto out_unlock;
5211 	spin_lock(&s->sc_lock);
5212 	switch (s->sc_type) {
5213 	case NFS4_DELEG_STID:
5214 		ret = nfserr_locks_held;
5215 		break;
5216 	case NFS4_OPEN_STID:
5217 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5218 		if (ret)
5219 			break;
5220 		ret = nfserr_locks_held;
5221 		break;
5222 	case NFS4_LOCK_STID:
5223 		spin_unlock(&s->sc_lock);
5224 		refcount_inc(&s->sc_count);
5225 		spin_unlock(&cl->cl_lock);
5226 		ret = nfsd4_free_lock_stateid(stateid, s);
5227 		goto out;
5228 	case NFS4_REVOKED_DELEG_STID:
5229 		spin_unlock(&s->sc_lock);
5230 		dp = delegstateid(s);
5231 		list_del_init(&dp->dl_recall_lru);
5232 		spin_unlock(&cl->cl_lock);
5233 		nfs4_put_stid(s);
5234 		ret = nfs_ok;
5235 		goto out;
5236 	/* Default falls through and returns nfserr_bad_stateid */
5237 	}
5238 	spin_unlock(&s->sc_lock);
5239 out_unlock:
5240 	spin_unlock(&cl->cl_lock);
5241 out:
5242 	return ret;
5243 }
5244 
5245 static inline int
5246 setlkflg (int type)
5247 {
5248 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5249 		RD_STATE : WR_STATE;
5250 }
5251 
5252 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5253 {
5254 	struct svc_fh *current_fh = &cstate->current_fh;
5255 	struct nfs4_stateowner *sop = stp->st_stateowner;
5256 	__be32 status;
5257 
5258 	status = nfsd4_check_seqid(cstate, sop, seqid);
5259 	if (status)
5260 		return status;
5261 	status = nfsd4_lock_ol_stateid(stp);
5262 	if (status != nfs_ok)
5263 		return status;
5264 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5265 	if (status == nfs_ok)
5266 		status = nfs4_check_fh(current_fh, &stp->st_stid);
5267 	if (status != nfs_ok)
5268 		mutex_unlock(&stp->st_mutex);
5269 	return status;
5270 }
5271 
5272 /*
5273  * Checks for sequence id mutating operations.
5274  */
5275 static __be32
5276 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5277 			 stateid_t *stateid, char typemask,
5278 			 struct nfs4_ol_stateid **stpp,
5279 			 struct nfsd_net *nn)
5280 {
5281 	__be32 status;
5282 	struct nfs4_stid *s;
5283 	struct nfs4_ol_stateid *stp = NULL;
5284 
5285 	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5286 		seqid, STATEID_VAL(stateid));
5287 
5288 	*stpp = NULL;
5289 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5290 	if (status)
5291 		return status;
5292 	stp = openlockstateid(s);
5293 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5294 
5295 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5296 	if (!status)
5297 		*stpp = stp;
5298 	else
5299 		nfs4_put_stid(&stp->st_stid);
5300 	return status;
5301 }
5302 
5303 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5304 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5305 {
5306 	__be32 status;
5307 	struct nfs4_openowner *oo;
5308 	struct nfs4_ol_stateid *stp;
5309 
5310 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5311 						NFS4_OPEN_STID, &stp, nn);
5312 	if (status)
5313 		return status;
5314 	oo = openowner(stp->st_stateowner);
5315 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5316 		mutex_unlock(&stp->st_mutex);
5317 		nfs4_put_stid(&stp->st_stid);
5318 		return nfserr_bad_stateid;
5319 	}
5320 	*stpp = stp;
5321 	return nfs_ok;
5322 }
5323 
5324 __be32
5325 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5326 		   union nfsd4_op_u *u)
5327 {
5328 	struct nfsd4_open_confirm *oc = &u->open_confirm;
5329 	__be32 status;
5330 	struct nfs4_openowner *oo;
5331 	struct nfs4_ol_stateid *stp;
5332 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5333 
5334 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5335 			cstate->current_fh.fh_dentry);
5336 
5337 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5338 	if (status)
5339 		return status;
5340 
5341 	status = nfs4_preprocess_seqid_op(cstate,
5342 					oc->oc_seqid, &oc->oc_req_stateid,
5343 					NFS4_OPEN_STID, &stp, nn);
5344 	if (status)
5345 		goto out;
5346 	oo = openowner(stp->st_stateowner);
5347 	status = nfserr_bad_stateid;
5348 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5349 		mutex_unlock(&stp->st_mutex);
5350 		goto put_stateid;
5351 	}
5352 	oo->oo_flags |= NFS4_OO_CONFIRMED;
5353 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5354 	mutex_unlock(&stp->st_mutex);
5355 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5356 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5357 
5358 	nfsd4_client_record_create(oo->oo_owner.so_client);
5359 	status = nfs_ok;
5360 put_stateid:
5361 	nfs4_put_stid(&stp->st_stid);
5362 out:
5363 	nfsd4_bump_seqid(cstate, status);
5364 	return status;
5365 }
5366 
5367 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5368 {
5369 	if (!test_access(access, stp))
5370 		return;
5371 	nfs4_file_put_access(stp->st_stid.sc_file, access);
5372 	clear_access(access, stp);
5373 }
5374 
5375 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5376 {
5377 	switch (to_access) {
5378 	case NFS4_SHARE_ACCESS_READ:
5379 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5380 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5381 		break;
5382 	case NFS4_SHARE_ACCESS_WRITE:
5383 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5384 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5385 		break;
5386 	case NFS4_SHARE_ACCESS_BOTH:
5387 		break;
5388 	default:
5389 		WARN_ON_ONCE(1);
5390 	}
5391 }
5392 
5393 __be32
5394 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5395 		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5396 {
5397 	struct nfsd4_open_downgrade *od = &u->open_downgrade;
5398 	__be32 status;
5399 	struct nfs4_ol_stateid *stp;
5400 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5401 
5402 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5403 			cstate->current_fh.fh_dentry);
5404 
5405 	/* We don't yet support WANT bits: */
5406 	if (od->od_deleg_want)
5407 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5408 			od->od_deleg_want);
5409 
5410 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5411 					&od->od_stateid, &stp, nn);
5412 	if (status)
5413 		goto out;
5414 	status = nfserr_inval;
5415 	if (!test_access(od->od_share_access, stp)) {
5416 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5417 			stp->st_access_bmap, od->od_share_access);
5418 		goto put_stateid;
5419 	}
5420 	if (!test_deny(od->od_share_deny, stp)) {
5421 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5422 			stp->st_deny_bmap, od->od_share_deny);
5423 		goto put_stateid;
5424 	}
5425 	nfs4_stateid_downgrade(stp, od->od_share_access);
5426 	reset_union_bmap_deny(od->od_share_deny, stp);
5427 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5428 	status = nfs_ok;
5429 put_stateid:
5430 	mutex_unlock(&stp->st_mutex);
5431 	nfs4_put_stid(&stp->st_stid);
5432 out:
5433 	nfsd4_bump_seqid(cstate, status);
5434 	return status;
5435 }
5436 
5437 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5438 {
5439 	struct nfs4_client *clp = s->st_stid.sc_client;
5440 	bool unhashed;
5441 	LIST_HEAD(reaplist);
5442 
5443 	spin_lock(&clp->cl_lock);
5444 	unhashed = unhash_open_stateid(s, &reaplist);
5445 
5446 	if (clp->cl_minorversion) {
5447 		if (unhashed)
5448 			put_ol_stateid_locked(s, &reaplist);
5449 		spin_unlock(&clp->cl_lock);
5450 		free_ol_stateid_reaplist(&reaplist);
5451 	} else {
5452 		spin_unlock(&clp->cl_lock);
5453 		free_ol_stateid_reaplist(&reaplist);
5454 		if (unhashed)
5455 			move_to_close_lru(s, clp->net);
5456 	}
5457 }
5458 
5459 /*
5460  * nfs4_unlock_state() called after encode
5461  */
5462 __be32
5463 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5464 		union nfsd4_op_u *u)
5465 {
5466 	struct nfsd4_close *close = &u->close;
5467 	__be32 status;
5468 	struct nfs4_ol_stateid *stp;
5469 	struct net *net = SVC_NET(rqstp);
5470 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5471 
5472 	dprintk("NFSD: nfsd4_close on file %pd\n",
5473 			cstate->current_fh.fh_dentry);
5474 
5475 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5476 					&close->cl_stateid,
5477 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
5478 					&stp, nn);
5479 	nfsd4_bump_seqid(cstate, status);
5480 	if (status)
5481 		goto out;
5482 
5483 	stp->st_stid.sc_type = NFS4_CLOSED_STID;
5484 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5485 
5486 	nfsd4_close_open_stateid(stp);
5487 	mutex_unlock(&stp->st_mutex);
5488 
5489 	/* See RFC5661 sectionm 18.2.4 */
5490 	if (stp->st_stid.sc_client->cl_minorversion)
5491 		memcpy(&close->cl_stateid, &close_stateid,
5492 				sizeof(close->cl_stateid));
5493 
5494 	/* put reference from nfs4_preprocess_seqid_op */
5495 	nfs4_put_stid(&stp->st_stid);
5496 out:
5497 	return status;
5498 }
5499 
5500 __be32
5501 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5502 		  union nfsd4_op_u *u)
5503 {
5504 	struct nfsd4_delegreturn *dr = &u->delegreturn;
5505 	struct nfs4_delegation *dp;
5506 	stateid_t *stateid = &dr->dr_stateid;
5507 	struct nfs4_stid *s;
5508 	__be32 status;
5509 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5510 
5511 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5512 		return status;
5513 
5514 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5515 	if (status)
5516 		goto out;
5517 	dp = delegstateid(s);
5518 	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
5519 	if (status)
5520 		goto put_stateid;
5521 
5522 	destroy_delegation(dp);
5523 put_stateid:
5524 	nfs4_put_stid(&dp->dl_stid);
5525 out:
5526 	return status;
5527 }
5528 
5529 static inline u64
5530 end_offset(u64 start, u64 len)
5531 {
5532 	u64 end;
5533 
5534 	end = start + len;
5535 	return end >= start ? end: NFS4_MAX_UINT64;
5536 }
5537 
5538 /* last octet in a range */
5539 static inline u64
5540 last_byte_offset(u64 start, u64 len)
5541 {
5542 	u64 end;
5543 
5544 	WARN_ON_ONCE(!len);
5545 	end = start + len;
5546 	return end > start ? end - 1: NFS4_MAX_UINT64;
5547 }
5548 
5549 /*
5550  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5551  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5552  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
5553  * locking, this prevents us from being completely protocol-compliant.  The
5554  * real solution to this problem is to start using unsigned file offsets in
5555  * the VFS, but this is a very deep change!
5556  */
5557 static inline void
5558 nfs4_transform_lock_offset(struct file_lock *lock)
5559 {
5560 	if (lock->fl_start < 0)
5561 		lock->fl_start = OFFSET_MAX;
5562 	if (lock->fl_end < 0)
5563 		lock->fl_end = OFFSET_MAX;
5564 }
5565 
5566 static fl_owner_t
5567 nfsd4_fl_get_owner(fl_owner_t owner)
5568 {
5569 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5570 
5571 	nfs4_get_stateowner(&lo->lo_owner);
5572 	return owner;
5573 }
5574 
5575 static void
5576 nfsd4_fl_put_owner(fl_owner_t owner)
5577 {
5578 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5579 
5580 	if (lo)
5581 		nfs4_put_stateowner(&lo->lo_owner);
5582 }
5583 
5584 static void
5585 nfsd4_lm_notify(struct file_lock *fl)
5586 {
5587 	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
5588 	struct net			*net = lo->lo_owner.so_client->net;
5589 	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
5590 	struct nfsd4_blocked_lock	*nbl = container_of(fl,
5591 						struct nfsd4_blocked_lock, nbl_lock);
5592 	bool queue = false;
5593 
5594 	/* An empty list means that something else is going to be using it */
5595 	spin_lock(&nn->blocked_locks_lock);
5596 	if (!list_empty(&nbl->nbl_list)) {
5597 		list_del_init(&nbl->nbl_list);
5598 		list_del_init(&nbl->nbl_lru);
5599 		queue = true;
5600 	}
5601 	spin_unlock(&nn->blocked_locks_lock);
5602 
5603 	if (queue)
5604 		nfsd4_run_cb(&nbl->nbl_cb);
5605 }
5606 
5607 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
5608 	.lm_notify = nfsd4_lm_notify,
5609 	.lm_get_owner = nfsd4_fl_get_owner,
5610 	.lm_put_owner = nfsd4_fl_put_owner,
5611 };
5612 
5613 static inline void
5614 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5615 {
5616 	struct nfs4_lockowner *lo;
5617 
5618 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5619 		lo = (struct nfs4_lockowner *) fl->fl_owner;
5620 		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5621 					lo->lo_owner.so_owner.len, GFP_KERNEL);
5622 		if (!deny->ld_owner.data)
5623 			/* We just don't care that much */
5624 			goto nevermind;
5625 		deny->ld_owner.len = lo->lo_owner.so_owner.len;
5626 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5627 	} else {
5628 nevermind:
5629 		deny->ld_owner.len = 0;
5630 		deny->ld_owner.data = NULL;
5631 		deny->ld_clientid.cl_boot = 0;
5632 		deny->ld_clientid.cl_id = 0;
5633 	}
5634 	deny->ld_start = fl->fl_start;
5635 	deny->ld_length = NFS4_MAX_UINT64;
5636 	if (fl->fl_end != NFS4_MAX_UINT64)
5637 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
5638 	deny->ld_type = NFS4_READ_LT;
5639 	if (fl->fl_type != F_RDLCK)
5640 		deny->ld_type = NFS4_WRITE_LT;
5641 }
5642 
5643 static struct nfs4_lockowner *
5644 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5645 {
5646 	unsigned int strhashval = ownerstr_hashval(owner);
5647 	struct nfs4_stateowner *so;
5648 
5649 	lockdep_assert_held(&clp->cl_lock);
5650 
5651 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5652 			    so_strhash) {
5653 		if (so->so_is_open_owner)
5654 			continue;
5655 		if (same_owner_str(so, owner))
5656 			return lockowner(nfs4_get_stateowner(so));
5657 	}
5658 	return NULL;
5659 }
5660 
5661 static struct nfs4_lockowner *
5662 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5663 {
5664 	struct nfs4_lockowner *lo;
5665 
5666 	spin_lock(&clp->cl_lock);
5667 	lo = find_lockowner_str_locked(clp, owner);
5668 	spin_unlock(&clp->cl_lock);
5669 	return lo;
5670 }
5671 
5672 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5673 {
5674 	unhash_lockowner_locked(lockowner(sop));
5675 }
5676 
5677 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5678 {
5679 	struct nfs4_lockowner *lo = lockowner(sop);
5680 
5681 	kmem_cache_free(lockowner_slab, lo);
5682 }
5683 
5684 static const struct nfs4_stateowner_operations lockowner_ops = {
5685 	.so_unhash =	nfs4_unhash_lockowner,
5686 	.so_free =	nfs4_free_lockowner,
5687 };
5688 
5689 /*
5690  * Alloc a lock owner structure.
5691  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5692  * occurred.
5693  *
5694  * strhashval = ownerstr_hashval
5695  */
5696 static struct nfs4_lockowner *
5697 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5698 			   struct nfs4_ol_stateid *open_stp,
5699 			   struct nfsd4_lock *lock)
5700 {
5701 	struct nfs4_lockowner *lo, *ret;
5702 
5703 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5704 	if (!lo)
5705 		return NULL;
5706 	INIT_LIST_HEAD(&lo->lo_blocked);
5707 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5708 	lo->lo_owner.so_is_open_owner = 0;
5709 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5710 	lo->lo_owner.so_ops = &lockowner_ops;
5711 	spin_lock(&clp->cl_lock);
5712 	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5713 	if (ret == NULL) {
5714 		list_add(&lo->lo_owner.so_strhash,
5715 			 &clp->cl_ownerstr_hashtbl[strhashval]);
5716 		ret = lo;
5717 	} else
5718 		nfs4_free_stateowner(&lo->lo_owner);
5719 
5720 	spin_unlock(&clp->cl_lock);
5721 	return ret;
5722 }
5723 
5724 static struct nfs4_ol_stateid *
5725 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5726 {
5727 	struct nfs4_ol_stateid *lst;
5728 	struct nfs4_client *clp = lo->lo_owner.so_client;
5729 
5730 	lockdep_assert_held(&clp->cl_lock);
5731 
5732 	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5733 		if (lst->st_stid.sc_type != NFS4_LOCK_STID)
5734 			continue;
5735 		if (lst->st_stid.sc_file == fp) {
5736 			refcount_inc(&lst->st_stid.sc_count);
5737 			return lst;
5738 		}
5739 	}
5740 	return NULL;
5741 }
5742 
5743 static struct nfs4_ol_stateid *
5744 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5745 		  struct nfs4_file *fp, struct inode *inode,
5746 		  struct nfs4_ol_stateid *open_stp)
5747 {
5748 	struct nfs4_client *clp = lo->lo_owner.so_client;
5749 	struct nfs4_ol_stateid *retstp;
5750 
5751 	mutex_init(&stp->st_mutex);
5752 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
5753 retry:
5754 	spin_lock(&clp->cl_lock);
5755 	spin_lock(&fp->fi_lock);
5756 	retstp = find_lock_stateid(lo, fp);
5757 	if (retstp)
5758 		goto out_unlock;
5759 
5760 	refcount_inc(&stp->st_stid.sc_count);
5761 	stp->st_stid.sc_type = NFS4_LOCK_STID;
5762 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5763 	get_nfs4_file(fp);
5764 	stp->st_stid.sc_file = fp;
5765 	stp->st_access_bmap = 0;
5766 	stp->st_deny_bmap = open_stp->st_deny_bmap;
5767 	stp->st_openstp = open_stp;
5768 	list_add(&stp->st_locks, &open_stp->st_locks);
5769 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5770 	list_add(&stp->st_perfile, &fp->fi_stateids);
5771 out_unlock:
5772 	spin_unlock(&fp->fi_lock);
5773 	spin_unlock(&clp->cl_lock);
5774 	if (retstp) {
5775 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
5776 			nfs4_put_stid(&retstp->st_stid);
5777 			goto retry;
5778 		}
5779 		/* To keep mutex tracking happy */
5780 		mutex_unlock(&stp->st_mutex);
5781 		stp = retstp;
5782 	}
5783 	return stp;
5784 }
5785 
5786 static struct nfs4_ol_stateid *
5787 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5788 			    struct inode *inode, struct nfs4_ol_stateid *ost,
5789 			    bool *new)
5790 {
5791 	struct nfs4_stid *ns = NULL;
5792 	struct nfs4_ol_stateid *lst;
5793 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5794 	struct nfs4_client *clp = oo->oo_owner.so_client;
5795 
5796 	*new = false;
5797 	spin_lock(&clp->cl_lock);
5798 	lst = find_lock_stateid(lo, fi);
5799 	spin_unlock(&clp->cl_lock);
5800 	if (lst != NULL) {
5801 		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
5802 			goto out;
5803 		nfs4_put_stid(&lst->st_stid);
5804 	}
5805 	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5806 	if (ns == NULL)
5807 		return NULL;
5808 
5809 	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
5810 	if (lst == openlockstateid(ns))
5811 		*new = true;
5812 	else
5813 		nfs4_put_stid(ns);
5814 out:
5815 	return lst;
5816 }
5817 
5818 static int
5819 check_lock_length(u64 offset, u64 length)
5820 {
5821 	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5822 		(length > ~offset)));
5823 }
5824 
5825 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5826 {
5827 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5828 
5829 	lockdep_assert_held(&fp->fi_lock);
5830 
5831 	if (test_access(access, lock_stp))
5832 		return;
5833 	__nfs4_file_get_access(fp, access);
5834 	set_access(access, lock_stp);
5835 }
5836 
5837 static __be32
5838 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5839 			    struct nfs4_ol_stateid *ost,
5840 			    struct nfsd4_lock *lock,
5841 			    struct nfs4_ol_stateid **plst, bool *new)
5842 {
5843 	__be32 status;
5844 	struct nfs4_file *fi = ost->st_stid.sc_file;
5845 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5846 	struct nfs4_client *cl = oo->oo_owner.so_client;
5847 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5848 	struct nfs4_lockowner *lo;
5849 	struct nfs4_ol_stateid *lst;
5850 	unsigned int strhashval;
5851 
5852 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
5853 	if (!lo) {
5854 		strhashval = ownerstr_hashval(&lock->lk_new_owner);
5855 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5856 		if (lo == NULL)
5857 			return nfserr_jukebox;
5858 	} else {
5859 		/* with an existing lockowner, seqids must be the same */
5860 		status = nfserr_bad_seqid;
5861 		if (!cstate->minorversion &&
5862 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5863 			goto out;
5864 	}
5865 
5866 	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5867 	if (lst == NULL) {
5868 		status = nfserr_jukebox;
5869 		goto out;
5870 	}
5871 
5872 	status = nfs_ok;
5873 	*plst = lst;
5874 out:
5875 	nfs4_put_stateowner(&lo->lo_owner);
5876 	return status;
5877 }
5878 
5879 /*
5880  *  LOCK operation
5881  */
5882 __be32
5883 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5884 	   union nfsd4_op_u *u)
5885 {
5886 	struct nfsd4_lock *lock = &u->lock;
5887 	struct nfs4_openowner *open_sop = NULL;
5888 	struct nfs4_lockowner *lock_sop = NULL;
5889 	struct nfs4_ol_stateid *lock_stp = NULL;
5890 	struct nfs4_ol_stateid *open_stp = NULL;
5891 	struct nfs4_file *fp;
5892 	struct file *filp = NULL;
5893 	struct nfsd4_blocked_lock *nbl = NULL;
5894 	struct file_lock *file_lock = NULL;
5895 	struct file_lock *conflock = NULL;
5896 	__be32 status = 0;
5897 	int lkflg;
5898 	int err;
5899 	bool new = false;
5900 	unsigned char fl_type;
5901 	unsigned int fl_flags = FL_POSIX;
5902 	struct net *net = SVC_NET(rqstp);
5903 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5904 
5905 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5906 		(long long) lock->lk_offset,
5907 		(long long) lock->lk_length);
5908 
5909 	if (check_lock_length(lock->lk_offset, lock->lk_length))
5910 		 return nfserr_inval;
5911 
5912 	if ((status = fh_verify(rqstp, &cstate->current_fh,
5913 				S_IFREG, NFSD_MAY_LOCK))) {
5914 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
5915 		return status;
5916 	}
5917 
5918 	if (lock->lk_is_new) {
5919 		if (nfsd4_has_session(cstate))
5920 			/* See rfc 5661 18.10.3: given clientid is ignored: */
5921 			memcpy(&lock->lk_new_clientid,
5922 				&cstate->session->se_client->cl_clientid,
5923 				sizeof(clientid_t));
5924 
5925 		status = nfserr_stale_clientid;
5926 		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5927 			goto out;
5928 
5929 		/* validate and update open stateid and open seqid */
5930 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
5931 				        lock->lk_new_open_seqid,
5932 		                        &lock->lk_new_open_stateid,
5933 					&open_stp, nn);
5934 		if (status)
5935 			goto out;
5936 		mutex_unlock(&open_stp->st_mutex);
5937 		open_sop = openowner(open_stp->st_stateowner);
5938 		status = nfserr_bad_stateid;
5939 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5940 						&lock->lk_new_clientid))
5941 			goto out;
5942 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
5943 							&lock_stp, &new);
5944 	} else {
5945 		status = nfs4_preprocess_seqid_op(cstate,
5946 				       lock->lk_old_lock_seqid,
5947 				       &lock->lk_old_lock_stateid,
5948 				       NFS4_LOCK_STID, &lock_stp, nn);
5949 	}
5950 	if (status)
5951 		goto out;
5952 	lock_sop = lockowner(lock_stp->st_stateowner);
5953 
5954 	lkflg = setlkflg(lock->lk_type);
5955 	status = nfs4_check_openmode(lock_stp, lkflg);
5956 	if (status)
5957 		goto out;
5958 
5959 	status = nfserr_grace;
5960 	if (locks_in_grace(net) && !lock->lk_reclaim)
5961 		goto out;
5962 	status = nfserr_no_grace;
5963 	if (!locks_in_grace(net) && lock->lk_reclaim)
5964 		goto out;
5965 
5966 	fp = lock_stp->st_stid.sc_file;
5967 	switch (lock->lk_type) {
5968 		case NFS4_READW_LT:
5969 			if (nfsd4_has_session(cstate))
5970 				fl_flags |= FL_SLEEP;
5971 			/* Fallthrough */
5972 		case NFS4_READ_LT:
5973 			spin_lock(&fp->fi_lock);
5974 			filp = find_readable_file_locked(fp);
5975 			if (filp)
5976 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5977 			spin_unlock(&fp->fi_lock);
5978 			fl_type = F_RDLCK;
5979 			break;
5980 		case NFS4_WRITEW_LT:
5981 			if (nfsd4_has_session(cstate))
5982 				fl_flags |= FL_SLEEP;
5983 			/* Fallthrough */
5984 		case NFS4_WRITE_LT:
5985 			spin_lock(&fp->fi_lock);
5986 			filp = find_writeable_file_locked(fp);
5987 			if (filp)
5988 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5989 			spin_unlock(&fp->fi_lock);
5990 			fl_type = F_WRLCK;
5991 			break;
5992 		default:
5993 			status = nfserr_inval;
5994 		goto out;
5995 	}
5996 
5997 	if (!filp) {
5998 		status = nfserr_openmode;
5999 		goto out;
6000 	}
6001 
6002 	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6003 	if (!nbl) {
6004 		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6005 		status = nfserr_jukebox;
6006 		goto out;
6007 	}
6008 
6009 	file_lock = &nbl->nbl_lock;
6010 	file_lock->fl_type = fl_type;
6011 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6012 	file_lock->fl_pid = current->tgid;
6013 	file_lock->fl_file = filp;
6014 	file_lock->fl_flags = fl_flags;
6015 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6016 	file_lock->fl_start = lock->lk_offset;
6017 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6018 	nfs4_transform_lock_offset(file_lock);
6019 
6020 	conflock = locks_alloc_lock();
6021 	if (!conflock) {
6022 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6023 		status = nfserr_jukebox;
6024 		goto out;
6025 	}
6026 
6027 	if (fl_flags & FL_SLEEP) {
6028 		nbl->nbl_time = jiffies;
6029 		spin_lock(&nn->blocked_locks_lock);
6030 		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6031 		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6032 		spin_unlock(&nn->blocked_locks_lock);
6033 	}
6034 
6035 	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6036 	switch (err) {
6037 	case 0: /* success! */
6038 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6039 		status = 0;
6040 		break;
6041 	case FILE_LOCK_DEFERRED:
6042 		nbl = NULL;
6043 		/* Fallthrough */
6044 	case -EAGAIN:		/* conflock holds conflicting lock */
6045 		status = nfserr_denied;
6046 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6047 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
6048 		break;
6049 	case -EDEADLK:
6050 		status = nfserr_deadlock;
6051 		break;
6052 	default:
6053 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6054 		status = nfserrno(err);
6055 		break;
6056 	}
6057 out:
6058 	if (nbl) {
6059 		/* dequeue it if we queued it before */
6060 		if (fl_flags & FL_SLEEP) {
6061 			spin_lock(&nn->blocked_locks_lock);
6062 			list_del_init(&nbl->nbl_list);
6063 			list_del_init(&nbl->nbl_lru);
6064 			spin_unlock(&nn->blocked_locks_lock);
6065 		}
6066 		free_blocked_lock(nbl);
6067 	}
6068 	if (filp)
6069 		fput(filp);
6070 	if (lock_stp) {
6071 		/* Bump seqid manually if the 4.0 replay owner is openowner */
6072 		if (cstate->replay_owner &&
6073 		    cstate->replay_owner != &lock_sop->lo_owner &&
6074 		    seqid_mutating_err(ntohl(status)))
6075 			lock_sop->lo_owner.so_seqid++;
6076 
6077 		/*
6078 		 * If this is a new, never-before-used stateid, and we are
6079 		 * returning an error, then just go ahead and release it.
6080 		 */
6081 		if (status && new)
6082 			release_lock_stateid(lock_stp);
6083 
6084 		mutex_unlock(&lock_stp->st_mutex);
6085 
6086 		nfs4_put_stid(&lock_stp->st_stid);
6087 	}
6088 	if (open_stp)
6089 		nfs4_put_stid(&open_stp->st_stid);
6090 	nfsd4_bump_seqid(cstate, status);
6091 	if (conflock)
6092 		locks_free_lock(conflock);
6093 	return status;
6094 }
6095 
6096 /*
6097  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6098  * so we do a temporary open here just to get an open file to pass to
6099  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
6100  * inode operation.)
6101  */
6102 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6103 {
6104 	struct file *file;
6105 	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6106 	if (!err) {
6107 		err = nfserrno(vfs_test_lock(file, lock));
6108 		fput(file);
6109 	}
6110 	return err;
6111 }
6112 
6113 /*
6114  * LOCKT operation
6115  */
6116 __be32
6117 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6118 	    union nfsd4_op_u *u)
6119 {
6120 	struct nfsd4_lockt *lockt = &u->lockt;
6121 	struct file_lock *file_lock = NULL;
6122 	struct nfs4_lockowner *lo = NULL;
6123 	__be32 status;
6124 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6125 
6126 	if (locks_in_grace(SVC_NET(rqstp)))
6127 		return nfserr_grace;
6128 
6129 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6130 		 return nfserr_inval;
6131 
6132 	if (!nfsd4_has_session(cstate)) {
6133 		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6134 		if (status)
6135 			goto out;
6136 	}
6137 
6138 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6139 		goto out;
6140 
6141 	file_lock = locks_alloc_lock();
6142 	if (!file_lock) {
6143 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6144 		status = nfserr_jukebox;
6145 		goto out;
6146 	}
6147 
6148 	switch (lockt->lt_type) {
6149 		case NFS4_READ_LT:
6150 		case NFS4_READW_LT:
6151 			file_lock->fl_type = F_RDLCK;
6152 		break;
6153 		case NFS4_WRITE_LT:
6154 		case NFS4_WRITEW_LT:
6155 			file_lock->fl_type = F_WRLCK;
6156 		break;
6157 		default:
6158 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6159 			status = nfserr_inval;
6160 		goto out;
6161 	}
6162 
6163 	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6164 	if (lo)
6165 		file_lock->fl_owner = (fl_owner_t)lo;
6166 	file_lock->fl_pid = current->tgid;
6167 	file_lock->fl_flags = FL_POSIX;
6168 
6169 	file_lock->fl_start = lockt->lt_offset;
6170 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6171 
6172 	nfs4_transform_lock_offset(file_lock);
6173 
6174 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6175 	if (status)
6176 		goto out;
6177 
6178 	if (file_lock->fl_type != F_UNLCK) {
6179 		status = nfserr_denied;
6180 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6181 	}
6182 out:
6183 	if (lo)
6184 		nfs4_put_stateowner(&lo->lo_owner);
6185 	if (file_lock)
6186 		locks_free_lock(file_lock);
6187 	return status;
6188 }
6189 
6190 __be32
6191 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6192 	    union nfsd4_op_u *u)
6193 {
6194 	struct nfsd4_locku *locku = &u->locku;
6195 	struct nfs4_ol_stateid *stp;
6196 	struct file *filp = NULL;
6197 	struct file_lock *file_lock = NULL;
6198 	__be32 status;
6199 	int err;
6200 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6201 
6202 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6203 		(long long) locku->lu_offset,
6204 		(long long) locku->lu_length);
6205 
6206 	if (check_lock_length(locku->lu_offset, locku->lu_length))
6207 		 return nfserr_inval;
6208 
6209 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6210 					&locku->lu_stateid, NFS4_LOCK_STID,
6211 					&stp, nn);
6212 	if (status)
6213 		goto out;
6214 	filp = find_any_file(stp->st_stid.sc_file);
6215 	if (!filp) {
6216 		status = nfserr_lock_range;
6217 		goto put_stateid;
6218 	}
6219 	file_lock = locks_alloc_lock();
6220 	if (!file_lock) {
6221 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6222 		status = nfserr_jukebox;
6223 		goto fput;
6224 	}
6225 
6226 	file_lock->fl_type = F_UNLCK;
6227 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6228 	file_lock->fl_pid = current->tgid;
6229 	file_lock->fl_file = filp;
6230 	file_lock->fl_flags = FL_POSIX;
6231 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6232 	file_lock->fl_start = locku->lu_offset;
6233 
6234 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
6235 						locku->lu_length);
6236 	nfs4_transform_lock_offset(file_lock);
6237 
6238 	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6239 	if (err) {
6240 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6241 		goto out_nfserr;
6242 	}
6243 	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6244 fput:
6245 	fput(filp);
6246 put_stateid:
6247 	mutex_unlock(&stp->st_mutex);
6248 	nfs4_put_stid(&stp->st_stid);
6249 out:
6250 	nfsd4_bump_seqid(cstate, status);
6251 	if (file_lock)
6252 		locks_free_lock(file_lock);
6253 	return status;
6254 
6255 out_nfserr:
6256 	status = nfserrno(err);
6257 	goto fput;
6258 }
6259 
6260 /*
6261  * returns
6262  * 	true:  locks held by lockowner
6263  * 	false: no locks held by lockowner
6264  */
6265 static bool
6266 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6267 {
6268 	struct file_lock *fl;
6269 	int status = false;
6270 	struct file *filp = find_any_file(fp);
6271 	struct inode *inode;
6272 	struct file_lock_context *flctx;
6273 
6274 	if (!filp) {
6275 		/* Any valid lock stateid should have some sort of access */
6276 		WARN_ON_ONCE(1);
6277 		return status;
6278 	}
6279 
6280 	inode = file_inode(filp);
6281 	flctx = inode->i_flctx;
6282 
6283 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6284 		spin_lock(&flctx->flc_lock);
6285 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6286 			if (fl->fl_owner == (fl_owner_t)lowner) {
6287 				status = true;
6288 				break;
6289 			}
6290 		}
6291 		spin_unlock(&flctx->flc_lock);
6292 	}
6293 	fput(filp);
6294 	return status;
6295 }
6296 
6297 __be32
6298 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6299 			struct nfsd4_compound_state *cstate,
6300 			union nfsd4_op_u *u)
6301 {
6302 	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6303 	clientid_t *clid = &rlockowner->rl_clientid;
6304 	struct nfs4_stateowner *sop;
6305 	struct nfs4_lockowner *lo = NULL;
6306 	struct nfs4_ol_stateid *stp;
6307 	struct xdr_netobj *owner = &rlockowner->rl_owner;
6308 	unsigned int hashval = ownerstr_hashval(owner);
6309 	__be32 status;
6310 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6311 	struct nfs4_client *clp;
6312 	LIST_HEAD (reaplist);
6313 
6314 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6315 		clid->cl_boot, clid->cl_id);
6316 
6317 	status = lookup_clientid(clid, cstate, nn);
6318 	if (status)
6319 		return status;
6320 
6321 	clp = cstate->clp;
6322 	/* Find the matching lock stateowner */
6323 	spin_lock(&clp->cl_lock);
6324 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6325 			    so_strhash) {
6326 
6327 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6328 			continue;
6329 
6330 		/* see if there are still any locks associated with it */
6331 		lo = lockowner(sop);
6332 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6333 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
6334 				status = nfserr_locks_held;
6335 				spin_unlock(&clp->cl_lock);
6336 				return status;
6337 			}
6338 		}
6339 
6340 		nfs4_get_stateowner(sop);
6341 		break;
6342 	}
6343 	if (!lo) {
6344 		spin_unlock(&clp->cl_lock);
6345 		return status;
6346 	}
6347 
6348 	unhash_lockowner_locked(lo);
6349 	while (!list_empty(&lo->lo_owner.so_stateids)) {
6350 		stp = list_first_entry(&lo->lo_owner.so_stateids,
6351 				       struct nfs4_ol_stateid,
6352 				       st_perstateowner);
6353 		WARN_ON(!unhash_lock_stateid(stp));
6354 		put_ol_stateid_locked(stp, &reaplist);
6355 	}
6356 	spin_unlock(&clp->cl_lock);
6357 	free_ol_stateid_reaplist(&reaplist);
6358 	nfs4_put_stateowner(&lo->lo_owner);
6359 
6360 	return status;
6361 }
6362 
6363 static inline struct nfs4_client_reclaim *
6364 alloc_reclaim(void)
6365 {
6366 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6367 }
6368 
6369 bool
6370 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6371 {
6372 	struct nfs4_client_reclaim *crp;
6373 
6374 	crp = nfsd4_find_reclaim_client(name, nn);
6375 	return (crp && crp->cr_clp);
6376 }
6377 
6378 /*
6379  * failure => all reset bets are off, nfserr_no_grace...
6380  */
6381 struct nfs4_client_reclaim *
6382 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6383 {
6384 	unsigned int strhashval;
6385 	struct nfs4_client_reclaim *crp;
6386 
6387 	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6388 	crp = alloc_reclaim();
6389 	if (crp) {
6390 		strhashval = clientstr_hashval(name);
6391 		INIT_LIST_HEAD(&crp->cr_strhash);
6392 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6393 		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6394 		crp->cr_clp = NULL;
6395 		nn->reclaim_str_hashtbl_size++;
6396 	}
6397 	return crp;
6398 }
6399 
6400 void
6401 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6402 {
6403 	list_del(&crp->cr_strhash);
6404 	kfree(crp);
6405 	nn->reclaim_str_hashtbl_size--;
6406 }
6407 
6408 void
6409 nfs4_release_reclaim(struct nfsd_net *nn)
6410 {
6411 	struct nfs4_client_reclaim *crp = NULL;
6412 	int i;
6413 
6414 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6415 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6416 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6417 			                struct nfs4_client_reclaim, cr_strhash);
6418 			nfs4_remove_reclaim_record(crp, nn);
6419 		}
6420 	}
6421 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6422 }
6423 
6424 /*
6425  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6426 struct nfs4_client_reclaim *
6427 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6428 {
6429 	unsigned int strhashval;
6430 	struct nfs4_client_reclaim *crp = NULL;
6431 
6432 	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6433 
6434 	strhashval = clientstr_hashval(recdir);
6435 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6436 		if (same_name(crp->cr_recdir, recdir)) {
6437 			return crp;
6438 		}
6439 	}
6440 	return NULL;
6441 }
6442 
6443 /*
6444 * Called from OPEN. Look for clientid in reclaim list.
6445 */
6446 __be32
6447 nfs4_check_open_reclaim(clientid_t *clid,
6448 		struct nfsd4_compound_state *cstate,
6449 		struct nfsd_net *nn)
6450 {
6451 	__be32 status;
6452 
6453 	/* find clientid in conf_id_hashtbl */
6454 	status = lookup_clientid(clid, cstate, nn);
6455 	if (status)
6456 		return nfserr_reclaim_bad;
6457 
6458 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6459 		return nfserr_no_grace;
6460 
6461 	if (nfsd4_client_record_check(cstate->clp))
6462 		return nfserr_reclaim_bad;
6463 
6464 	return nfs_ok;
6465 }
6466 
6467 #ifdef CONFIG_NFSD_FAULT_INJECTION
6468 static inline void
6469 put_client(struct nfs4_client *clp)
6470 {
6471 	atomic_dec(&clp->cl_refcount);
6472 }
6473 
6474 static struct nfs4_client *
6475 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6476 {
6477 	struct nfs4_client *clp;
6478 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6479 					  nfsd_net_id);
6480 
6481 	if (!nfsd_netns_ready(nn))
6482 		return NULL;
6483 
6484 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6485 		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6486 			return clp;
6487 	}
6488 	return NULL;
6489 }
6490 
6491 u64
6492 nfsd_inject_print_clients(void)
6493 {
6494 	struct nfs4_client *clp;
6495 	u64 count = 0;
6496 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6497 					  nfsd_net_id);
6498 	char buf[INET6_ADDRSTRLEN];
6499 
6500 	if (!nfsd_netns_ready(nn))
6501 		return 0;
6502 
6503 	spin_lock(&nn->client_lock);
6504 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6505 		rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6506 		pr_info("NFS Client: %s\n", buf);
6507 		++count;
6508 	}
6509 	spin_unlock(&nn->client_lock);
6510 
6511 	return count;
6512 }
6513 
6514 u64
6515 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6516 {
6517 	u64 count = 0;
6518 	struct nfs4_client *clp;
6519 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6520 					  nfsd_net_id);
6521 
6522 	if (!nfsd_netns_ready(nn))
6523 		return count;
6524 
6525 	spin_lock(&nn->client_lock);
6526 	clp = nfsd_find_client(addr, addr_size);
6527 	if (clp) {
6528 		if (mark_client_expired_locked(clp) == nfs_ok)
6529 			++count;
6530 		else
6531 			clp = NULL;
6532 	}
6533 	spin_unlock(&nn->client_lock);
6534 
6535 	if (clp)
6536 		expire_client(clp);
6537 
6538 	return count;
6539 }
6540 
6541 u64
6542 nfsd_inject_forget_clients(u64 max)
6543 {
6544 	u64 count = 0;
6545 	struct nfs4_client *clp, *next;
6546 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6547 						nfsd_net_id);
6548 	LIST_HEAD(reaplist);
6549 
6550 	if (!nfsd_netns_ready(nn))
6551 		return count;
6552 
6553 	spin_lock(&nn->client_lock);
6554 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6555 		if (mark_client_expired_locked(clp) == nfs_ok) {
6556 			list_add(&clp->cl_lru, &reaplist);
6557 			if (max != 0 && ++count >= max)
6558 				break;
6559 		}
6560 	}
6561 	spin_unlock(&nn->client_lock);
6562 
6563 	list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6564 		expire_client(clp);
6565 
6566 	return count;
6567 }
6568 
6569 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6570 			     const char *type)
6571 {
6572 	char buf[INET6_ADDRSTRLEN];
6573 	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6574 	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6575 }
6576 
6577 static void
6578 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6579 			     struct list_head *collect)
6580 {
6581 	struct nfs4_client *clp = lst->st_stid.sc_client;
6582 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6583 					  nfsd_net_id);
6584 
6585 	if (!collect)
6586 		return;
6587 
6588 	lockdep_assert_held(&nn->client_lock);
6589 	atomic_inc(&clp->cl_refcount);
6590 	list_add(&lst->st_locks, collect);
6591 }
6592 
6593 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6594 				    struct list_head *collect,
6595 				    bool (*func)(struct nfs4_ol_stateid *))
6596 {
6597 	struct nfs4_openowner *oop;
6598 	struct nfs4_ol_stateid *stp, *st_next;
6599 	struct nfs4_ol_stateid *lst, *lst_next;
6600 	u64 count = 0;
6601 
6602 	spin_lock(&clp->cl_lock);
6603 	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6604 		list_for_each_entry_safe(stp, st_next,
6605 				&oop->oo_owner.so_stateids, st_perstateowner) {
6606 			list_for_each_entry_safe(lst, lst_next,
6607 					&stp->st_locks, st_locks) {
6608 				if (func) {
6609 					if (func(lst))
6610 						nfsd_inject_add_lock_to_list(lst,
6611 									collect);
6612 				}
6613 				++count;
6614 				/*
6615 				 * Despite the fact that these functions deal
6616 				 * with 64-bit integers for "count", we must
6617 				 * ensure that it doesn't blow up the
6618 				 * clp->cl_refcount. Throw a warning if we
6619 				 * start to approach INT_MAX here.
6620 				 */
6621 				WARN_ON_ONCE(count == (INT_MAX / 2));
6622 				if (count == max)
6623 					goto out;
6624 			}
6625 		}
6626 	}
6627 out:
6628 	spin_unlock(&clp->cl_lock);
6629 
6630 	return count;
6631 }
6632 
6633 static u64
6634 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6635 			  u64 max)
6636 {
6637 	return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6638 }
6639 
6640 static u64
6641 nfsd_print_client_locks(struct nfs4_client *clp)
6642 {
6643 	u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6644 	nfsd_print_count(clp, count, "locked files");
6645 	return count;
6646 }
6647 
6648 u64
6649 nfsd_inject_print_locks(void)
6650 {
6651 	struct nfs4_client *clp;
6652 	u64 count = 0;
6653 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6654 						nfsd_net_id);
6655 
6656 	if (!nfsd_netns_ready(nn))
6657 		return 0;
6658 
6659 	spin_lock(&nn->client_lock);
6660 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6661 		count += nfsd_print_client_locks(clp);
6662 	spin_unlock(&nn->client_lock);
6663 
6664 	return count;
6665 }
6666 
6667 static void
6668 nfsd_reap_locks(struct list_head *reaplist)
6669 {
6670 	struct nfs4_client *clp;
6671 	struct nfs4_ol_stateid *stp, *next;
6672 
6673 	list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6674 		list_del_init(&stp->st_locks);
6675 		clp = stp->st_stid.sc_client;
6676 		nfs4_put_stid(&stp->st_stid);
6677 		put_client(clp);
6678 	}
6679 }
6680 
6681 u64
6682 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6683 {
6684 	unsigned int count = 0;
6685 	struct nfs4_client *clp;
6686 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6687 						nfsd_net_id);
6688 	LIST_HEAD(reaplist);
6689 
6690 	if (!nfsd_netns_ready(nn))
6691 		return count;
6692 
6693 	spin_lock(&nn->client_lock);
6694 	clp = nfsd_find_client(addr, addr_size);
6695 	if (clp)
6696 		count = nfsd_collect_client_locks(clp, &reaplist, 0);
6697 	spin_unlock(&nn->client_lock);
6698 	nfsd_reap_locks(&reaplist);
6699 	return count;
6700 }
6701 
6702 u64
6703 nfsd_inject_forget_locks(u64 max)
6704 {
6705 	u64 count = 0;
6706 	struct nfs4_client *clp;
6707 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6708 						nfsd_net_id);
6709 	LIST_HEAD(reaplist);
6710 
6711 	if (!nfsd_netns_ready(nn))
6712 		return count;
6713 
6714 	spin_lock(&nn->client_lock);
6715 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6716 		count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6717 		if (max != 0 && count >= max)
6718 			break;
6719 	}
6720 	spin_unlock(&nn->client_lock);
6721 	nfsd_reap_locks(&reaplist);
6722 	return count;
6723 }
6724 
6725 static u64
6726 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6727 			      struct list_head *collect,
6728 			      void (*func)(struct nfs4_openowner *))
6729 {
6730 	struct nfs4_openowner *oop, *next;
6731 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6732 						nfsd_net_id);
6733 	u64 count = 0;
6734 
6735 	lockdep_assert_held(&nn->client_lock);
6736 
6737 	spin_lock(&clp->cl_lock);
6738 	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6739 		if (func) {
6740 			func(oop);
6741 			if (collect) {
6742 				atomic_inc(&clp->cl_refcount);
6743 				list_add(&oop->oo_perclient, collect);
6744 			}
6745 		}
6746 		++count;
6747 		/*
6748 		 * Despite the fact that these functions deal with
6749 		 * 64-bit integers for "count", we must ensure that
6750 		 * it doesn't blow up the clp->cl_refcount. Throw a
6751 		 * warning if we start to approach INT_MAX here.
6752 		 */
6753 		WARN_ON_ONCE(count == (INT_MAX / 2));
6754 		if (count == max)
6755 			break;
6756 	}
6757 	spin_unlock(&clp->cl_lock);
6758 
6759 	return count;
6760 }
6761 
6762 static u64
6763 nfsd_print_client_openowners(struct nfs4_client *clp)
6764 {
6765 	u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6766 
6767 	nfsd_print_count(clp, count, "openowners");
6768 	return count;
6769 }
6770 
6771 static u64
6772 nfsd_collect_client_openowners(struct nfs4_client *clp,
6773 			       struct list_head *collect, u64 max)
6774 {
6775 	return nfsd_foreach_client_openowner(clp, max, collect,
6776 						unhash_openowner_locked);
6777 }
6778 
6779 u64
6780 nfsd_inject_print_openowners(void)
6781 {
6782 	struct nfs4_client *clp;
6783 	u64 count = 0;
6784 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6785 						nfsd_net_id);
6786 
6787 	if (!nfsd_netns_ready(nn))
6788 		return 0;
6789 
6790 	spin_lock(&nn->client_lock);
6791 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6792 		count += nfsd_print_client_openowners(clp);
6793 	spin_unlock(&nn->client_lock);
6794 
6795 	return count;
6796 }
6797 
6798 static void
6799 nfsd_reap_openowners(struct list_head *reaplist)
6800 {
6801 	struct nfs4_client *clp;
6802 	struct nfs4_openowner *oop, *next;
6803 
6804 	list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6805 		list_del_init(&oop->oo_perclient);
6806 		clp = oop->oo_owner.so_client;
6807 		release_openowner(oop);
6808 		put_client(clp);
6809 	}
6810 }
6811 
6812 u64
6813 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6814 				     size_t addr_size)
6815 {
6816 	unsigned int count = 0;
6817 	struct nfs4_client *clp;
6818 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6819 						nfsd_net_id);
6820 	LIST_HEAD(reaplist);
6821 
6822 	if (!nfsd_netns_ready(nn))
6823 		return count;
6824 
6825 	spin_lock(&nn->client_lock);
6826 	clp = nfsd_find_client(addr, addr_size);
6827 	if (clp)
6828 		count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6829 	spin_unlock(&nn->client_lock);
6830 	nfsd_reap_openowners(&reaplist);
6831 	return count;
6832 }
6833 
6834 u64
6835 nfsd_inject_forget_openowners(u64 max)
6836 {
6837 	u64 count = 0;
6838 	struct nfs4_client *clp;
6839 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6840 						nfsd_net_id);
6841 	LIST_HEAD(reaplist);
6842 
6843 	if (!nfsd_netns_ready(nn))
6844 		return count;
6845 
6846 	spin_lock(&nn->client_lock);
6847 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6848 		count += nfsd_collect_client_openowners(clp, &reaplist,
6849 							max - count);
6850 		if (max != 0 && count >= max)
6851 			break;
6852 	}
6853 	spin_unlock(&nn->client_lock);
6854 	nfsd_reap_openowners(&reaplist);
6855 	return count;
6856 }
6857 
6858 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6859 				     struct list_head *victims)
6860 {
6861 	struct nfs4_delegation *dp, *next;
6862 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6863 						nfsd_net_id);
6864 	u64 count = 0;
6865 
6866 	lockdep_assert_held(&nn->client_lock);
6867 
6868 	spin_lock(&state_lock);
6869 	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6870 		if (victims) {
6871 			/*
6872 			 * It's not safe to mess with delegations that have a
6873 			 * non-zero dl_time. They might have already been broken
6874 			 * and could be processed by the laundromat outside of
6875 			 * the state_lock. Just leave them be.
6876 			 */
6877 			if (dp->dl_time != 0)
6878 				continue;
6879 
6880 			atomic_inc(&clp->cl_refcount);
6881 			WARN_ON(!unhash_delegation_locked(dp));
6882 			list_add(&dp->dl_recall_lru, victims);
6883 		}
6884 		++count;
6885 		/*
6886 		 * Despite the fact that these functions deal with
6887 		 * 64-bit integers for "count", we must ensure that
6888 		 * it doesn't blow up the clp->cl_refcount. Throw a
6889 		 * warning if we start to approach INT_MAX here.
6890 		 */
6891 		WARN_ON_ONCE(count == (INT_MAX / 2));
6892 		if (count == max)
6893 			break;
6894 	}
6895 	spin_unlock(&state_lock);
6896 	return count;
6897 }
6898 
6899 static u64
6900 nfsd_print_client_delegations(struct nfs4_client *clp)
6901 {
6902 	u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6903 
6904 	nfsd_print_count(clp, count, "delegations");
6905 	return count;
6906 }
6907 
6908 u64
6909 nfsd_inject_print_delegations(void)
6910 {
6911 	struct nfs4_client *clp;
6912 	u64 count = 0;
6913 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6914 						nfsd_net_id);
6915 
6916 	if (!nfsd_netns_ready(nn))
6917 		return 0;
6918 
6919 	spin_lock(&nn->client_lock);
6920 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6921 		count += nfsd_print_client_delegations(clp);
6922 	spin_unlock(&nn->client_lock);
6923 
6924 	return count;
6925 }
6926 
6927 static void
6928 nfsd_forget_delegations(struct list_head *reaplist)
6929 {
6930 	struct nfs4_client *clp;
6931 	struct nfs4_delegation *dp, *next;
6932 
6933 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6934 		list_del_init(&dp->dl_recall_lru);
6935 		clp = dp->dl_stid.sc_client;
6936 		revoke_delegation(dp);
6937 		put_client(clp);
6938 	}
6939 }
6940 
6941 u64
6942 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6943 				      size_t addr_size)
6944 {
6945 	u64 count = 0;
6946 	struct nfs4_client *clp;
6947 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6948 						nfsd_net_id);
6949 	LIST_HEAD(reaplist);
6950 
6951 	if (!nfsd_netns_ready(nn))
6952 		return count;
6953 
6954 	spin_lock(&nn->client_lock);
6955 	clp = nfsd_find_client(addr, addr_size);
6956 	if (clp)
6957 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6958 	spin_unlock(&nn->client_lock);
6959 
6960 	nfsd_forget_delegations(&reaplist);
6961 	return count;
6962 }
6963 
6964 u64
6965 nfsd_inject_forget_delegations(u64 max)
6966 {
6967 	u64 count = 0;
6968 	struct nfs4_client *clp;
6969 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6970 						nfsd_net_id);
6971 	LIST_HEAD(reaplist);
6972 
6973 	if (!nfsd_netns_ready(nn))
6974 		return count;
6975 
6976 	spin_lock(&nn->client_lock);
6977 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6978 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6979 		if (max != 0 && count >= max)
6980 			break;
6981 	}
6982 	spin_unlock(&nn->client_lock);
6983 	nfsd_forget_delegations(&reaplist);
6984 	return count;
6985 }
6986 
6987 static void
6988 nfsd_recall_delegations(struct list_head *reaplist)
6989 {
6990 	struct nfs4_client *clp;
6991 	struct nfs4_delegation *dp, *next;
6992 
6993 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6994 		list_del_init(&dp->dl_recall_lru);
6995 		clp = dp->dl_stid.sc_client;
6996 		/*
6997 		 * We skipped all entries that had a zero dl_time before,
6998 		 * so we can now reset the dl_time back to 0. If a delegation
6999 		 * break comes in now, then it won't make any difference since
7000 		 * we're recalling it either way.
7001 		 */
7002 		spin_lock(&state_lock);
7003 		dp->dl_time = 0;
7004 		spin_unlock(&state_lock);
7005 		nfsd_break_one_deleg(dp);
7006 		put_client(clp);
7007 	}
7008 }
7009 
7010 u64
7011 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7012 				      size_t addr_size)
7013 {
7014 	u64 count = 0;
7015 	struct nfs4_client *clp;
7016 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7017 						nfsd_net_id);
7018 	LIST_HEAD(reaplist);
7019 
7020 	if (!nfsd_netns_ready(nn))
7021 		return count;
7022 
7023 	spin_lock(&nn->client_lock);
7024 	clp = nfsd_find_client(addr, addr_size);
7025 	if (clp)
7026 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
7027 	spin_unlock(&nn->client_lock);
7028 
7029 	nfsd_recall_delegations(&reaplist);
7030 	return count;
7031 }
7032 
7033 u64
7034 nfsd_inject_recall_delegations(u64 max)
7035 {
7036 	u64 count = 0;
7037 	struct nfs4_client *clp, *next;
7038 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7039 						nfsd_net_id);
7040 	LIST_HEAD(reaplist);
7041 
7042 	if (!nfsd_netns_ready(nn))
7043 		return count;
7044 
7045 	spin_lock(&nn->client_lock);
7046 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7047 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7048 		if (max != 0 && ++count >= max)
7049 			break;
7050 	}
7051 	spin_unlock(&nn->client_lock);
7052 	nfsd_recall_delegations(&reaplist);
7053 	return count;
7054 }
7055 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7056 
7057 /*
7058  * Since the lifetime of a delegation isn't limited to that of an open, a
7059  * client may quite reasonably hang on to a delegation as long as it has
7060  * the inode cached.  This becomes an obvious problem the first time a
7061  * client's inode cache approaches the size of the server's total memory.
7062  *
7063  * For now we avoid this problem by imposing a hard limit on the number
7064  * of delegations, which varies according to the server's memory size.
7065  */
7066 static void
7067 set_max_delegations(void)
7068 {
7069 	/*
7070 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
7071 	 * estimates suggest that in the worst case (where every delegation
7072 	 * is for a different inode), a delegation could take about 1.5K,
7073 	 * giving a worst case usage of about 6% of memory.
7074 	 */
7075 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7076 }
7077 
7078 static int nfs4_state_create_net(struct net *net)
7079 {
7080 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7081 	int i;
7082 
7083 	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7084 			CLIENT_HASH_SIZE, GFP_KERNEL);
7085 	if (!nn->conf_id_hashtbl)
7086 		goto err;
7087 	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7088 			CLIENT_HASH_SIZE, GFP_KERNEL);
7089 	if (!nn->unconf_id_hashtbl)
7090 		goto err_unconf_id;
7091 	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7092 			SESSION_HASH_SIZE, GFP_KERNEL);
7093 	if (!nn->sessionid_hashtbl)
7094 		goto err_sessionid;
7095 
7096 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7097 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7098 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7099 	}
7100 	for (i = 0; i < SESSION_HASH_SIZE; i++)
7101 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7102 	nn->conf_name_tree = RB_ROOT;
7103 	nn->unconf_name_tree = RB_ROOT;
7104 	nn->boot_time = get_seconds();
7105 	nn->grace_ended = false;
7106 	nn->nfsd4_manager.block_opens = true;
7107 	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7108 	INIT_LIST_HEAD(&nn->client_lru);
7109 	INIT_LIST_HEAD(&nn->close_lru);
7110 	INIT_LIST_HEAD(&nn->del_recall_lru);
7111 	spin_lock_init(&nn->client_lock);
7112 
7113 	spin_lock_init(&nn->blocked_locks_lock);
7114 	INIT_LIST_HEAD(&nn->blocked_locks_lru);
7115 
7116 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7117 	get_net(net);
7118 
7119 	return 0;
7120 
7121 err_sessionid:
7122 	kfree(nn->unconf_id_hashtbl);
7123 err_unconf_id:
7124 	kfree(nn->conf_id_hashtbl);
7125 err:
7126 	return -ENOMEM;
7127 }
7128 
7129 static void
7130 nfs4_state_destroy_net(struct net *net)
7131 {
7132 	int i;
7133 	struct nfs4_client *clp = NULL;
7134 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7135 
7136 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7137 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
7138 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7139 			destroy_client(clp);
7140 		}
7141 	}
7142 
7143 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7144 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7145 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7146 			destroy_client(clp);
7147 		}
7148 	}
7149 
7150 	kfree(nn->sessionid_hashtbl);
7151 	kfree(nn->unconf_id_hashtbl);
7152 	kfree(nn->conf_id_hashtbl);
7153 	put_net(net);
7154 }
7155 
7156 int
7157 nfs4_state_start_net(struct net *net)
7158 {
7159 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7160 	int ret;
7161 
7162 	ret = nfs4_state_create_net(net);
7163 	if (ret)
7164 		return ret;
7165 	locks_start_grace(net, &nn->nfsd4_manager);
7166 	nfsd4_client_tracking_init(net);
7167 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7168 	       nn->nfsd4_grace, net->ns.inum);
7169 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7170 	return 0;
7171 }
7172 
7173 /* initialization to perform when the nfsd service is started: */
7174 
7175 int
7176 nfs4_state_start(void)
7177 {
7178 	int ret;
7179 
7180 	ret = set_callback_cred();
7181 	if (ret)
7182 		return ret;
7183 
7184 	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7185 	if (laundry_wq == NULL) {
7186 		ret = -ENOMEM;
7187 		goto out_cleanup_cred;
7188 	}
7189 	ret = nfsd4_create_callback_queue();
7190 	if (ret)
7191 		goto out_free_laundry;
7192 
7193 	set_max_delegations();
7194 	return 0;
7195 
7196 out_free_laundry:
7197 	destroy_workqueue(laundry_wq);
7198 out_cleanup_cred:
7199 	cleanup_callback_cred();
7200 	return ret;
7201 }
7202 
7203 void
7204 nfs4_state_shutdown_net(struct net *net)
7205 {
7206 	struct nfs4_delegation *dp = NULL;
7207 	struct list_head *pos, *next, reaplist;
7208 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7209 	struct nfsd4_blocked_lock *nbl;
7210 
7211 	cancel_delayed_work_sync(&nn->laundromat_work);
7212 	locks_end_grace(&nn->nfsd4_manager);
7213 
7214 	INIT_LIST_HEAD(&reaplist);
7215 	spin_lock(&state_lock);
7216 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
7217 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7218 		WARN_ON(!unhash_delegation_locked(dp));
7219 		list_add(&dp->dl_recall_lru, &reaplist);
7220 	}
7221 	spin_unlock(&state_lock);
7222 	list_for_each_safe(pos, next, &reaplist) {
7223 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7224 		list_del_init(&dp->dl_recall_lru);
7225 		put_clnt_odstate(dp->dl_clnt_odstate);
7226 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
7227 		nfs4_put_stid(&dp->dl_stid);
7228 	}
7229 
7230 	BUG_ON(!list_empty(&reaplist));
7231 	spin_lock(&nn->blocked_locks_lock);
7232 	while (!list_empty(&nn->blocked_locks_lru)) {
7233 		nbl = list_first_entry(&nn->blocked_locks_lru,
7234 					struct nfsd4_blocked_lock, nbl_lru);
7235 		list_move(&nbl->nbl_lru, &reaplist);
7236 		list_del_init(&nbl->nbl_list);
7237 	}
7238 	spin_unlock(&nn->blocked_locks_lock);
7239 
7240 	while (!list_empty(&reaplist)) {
7241 		nbl = list_first_entry(&reaplist,
7242 					struct nfsd4_blocked_lock, nbl_lru);
7243 		list_del_init(&nbl->nbl_lru);
7244 		posix_unblock_lock(&nbl->nbl_lock);
7245 		free_blocked_lock(nbl);
7246 	}
7247 
7248 	nfsd4_client_tracking_exit(net);
7249 	nfs4_state_destroy_net(net);
7250 }
7251 
7252 void
7253 nfs4_state_shutdown(void)
7254 {
7255 	destroy_workqueue(laundry_wq);
7256 	nfsd4_destroy_callback_queue();
7257 	cleanup_callback_cred();
7258 }
7259 
7260 static void
7261 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7262 {
7263 	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7264 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7265 }
7266 
7267 static void
7268 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7269 {
7270 	if (cstate->minorversion) {
7271 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7272 		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7273 	}
7274 }
7275 
7276 void
7277 clear_current_stateid(struct nfsd4_compound_state *cstate)
7278 {
7279 	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7280 }
7281 
7282 /*
7283  * functions to set current state id
7284  */
7285 void
7286 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7287 		union nfsd4_op_u *u)
7288 {
7289 	put_stateid(cstate, &u->open_downgrade.od_stateid);
7290 }
7291 
7292 void
7293 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7294 		union nfsd4_op_u *u)
7295 {
7296 	put_stateid(cstate, &u->open.op_stateid);
7297 }
7298 
7299 void
7300 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7301 		union nfsd4_op_u *u)
7302 {
7303 	put_stateid(cstate, &u->close.cl_stateid);
7304 }
7305 
7306 void
7307 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7308 		union nfsd4_op_u *u)
7309 {
7310 	put_stateid(cstate, &u->lock.lk_resp_stateid);
7311 }
7312 
7313 /*
7314  * functions to consume current state id
7315  */
7316 
7317 void
7318 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7319 		union nfsd4_op_u *u)
7320 {
7321 	get_stateid(cstate, &u->open_downgrade.od_stateid);
7322 }
7323 
7324 void
7325 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7326 		union nfsd4_op_u *u)
7327 {
7328 	get_stateid(cstate, &u->delegreturn.dr_stateid);
7329 }
7330 
7331 void
7332 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7333 		union nfsd4_op_u *u)
7334 {
7335 	get_stateid(cstate, &u->free_stateid.fr_stateid);
7336 }
7337 
7338 void
7339 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7340 		union nfsd4_op_u *u)
7341 {
7342 	get_stateid(cstate, &u->setattr.sa_stateid);
7343 }
7344 
7345 void
7346 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7347 		union nfsd4_op_u *u)
7348 {
7349 	get_stateid(cstate, &u->close.cl_stateid);
7350 }
7351 
7352 void
7353 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7354 		union nfsd4_op_u *u)
7355 {
7356 	get_stateid(cstate, &u->locku.lu_stateid);
7357 }
7358 
7359 void
7360 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7361 		union nfsd4_op_u *u)
7362 {
7363 	get_stateid(cstate, &u->read.rd_stateid);
7364 }
7365 
7366 void
7367 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7368 		union nfsd4_op_u *u)
7369 {
7370 	get_stateid(cstate, &u->write.wr_stateid);
7371 }
7372