xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision e6c81cce)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49 
50 #include "netns.h"
51 #include "pnfs.h"
52 
53 #define NFSDDBG_FACILITY                NFSDDBG_PROC
54 
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 	.si_generation = ~0,
58 	.si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 	/* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 	.si_generation = 1,
65 };
66 
67 static u64 current_sessionid = 1;
68 
69 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
70 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
71 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
72 
73 /* forward declarations */
74 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
75 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
76 
77 /* Locking: */
78 
79 /*
80  * Currently used for the del_recall_lru and file hash table.  In an
81  * effort to decrease the scope of the client_mutex, this spinlock may
82  * eventually cover more:
83  */
84 static DEFINE_SPINLOCK(state_lock);
85 
86 /*
87  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
88  * the refcount on the open stateid to drop.
89  */
90 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
91 
92 static struct kmem_cache *openowner_slab;
93 static struct kmem_cache *lockowner_slab;
94 static struct kmem_cache *file_slab;
95 static struct kmem_cache *stateid_slab;
96 static struct kmem_cache *deleg_slab;
97 
98 static void free_session(struct nfsd4_session *);
99 
100 static struct nfsd4_callback_ops nfsd4_cb_recall_ops;
101 
102 static bool is_session_dead(struct nfsd4_session *ses)
103 {
104 	return ses->se_flags & NFS4_SESSION_DEAD;
105 }
106 
107 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
108 {
109 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
110 		return nfserr_jukebox;
111 	ses->se_flags |= NFS4_SESSION_DEAD;
112 	return nfs_ok;
113 }
114 
115 static bool is_client_expired(struct nfs4_client *clp)
116 {
117 	return clp->cl_time == 0;
118 }
119 
120 static __be32 get_client_locked(struct nfs4_client *clp)
121 {
122 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
123 
124 	lockdep_assert_held(&nn->client_lock);
125 
126 	if (is_client_expired(clp))
127 		return nfserr_expired;
128 	atomic_inc(&clp->cl_refcount);
129 	return nfs_ok;
130 }
131 
132 /* must be called under the client_lock */
133 static inline void
134 renew_client_locked(struct nfs4_client *clp)
135 {
136 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
137 
138 	if (is_client_expired(clp)) {
139 		WARN_ON(1);
140 		printk("%s: client (clientid %08x/%08x) already expired\n",
141 			__func__,
142 			clp->cl_clientid.cl_boot,
143 			clp->cl_clientid.cl_id);
144 		return;
145 	}
146 
147 	dprintk("renewing client (clientid %08x/%08x)\n",
148 			clp->cl_clientid.cl_boot,
149 			clp->cl_clientid.cl_id);
150 	list_move_tail(&clp->cl_lru, &nn->client_lru);
151 	clp->cl_time = get_seconds();
152 }
153 
154 static void put_client_renew_locked(struct nfs4_client *clp)
155 {
156 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
157 
158 	lockdep_assert_held(&nn->client_lock);
159 
160 	if (!atomic_dec_and_test(&clp->cl_refcount))
161 		return;
162 	if (!is_client_expired(clp))
163 		renew_client_locked(clp);
164 }
165 
166 static void put_client_renew(struct nfs4_client *clp)
167 {
168 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
169 
170 	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
171 		return;
172 	if (!is_client_expired(clp))
173 		renew_client_locked(clp);
174 	spin_unlock(&nn->client_lock);
175 }
176 
177 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
178 {
179 	__be32 status;
180 
181 	if (is_session_dead(ses))
182 		return nfserr_badsession;
183 	status = get_client_locked(ses->se_client);
184 	if (status)
185 		return status;
186 	atomic_inc(&ses->se_ref);
187 	return nfs_ok;
188 }
189 
190 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
191 {
192 	struct nfs4_client *clp = ses->se_client;
193 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
194 
195 	lockdep_assert_held(&nn->client_lock);
196 
197 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
198 		free_session(ses);
199 	put_client_renew_locked(clp);
200 }
201 
202 static void nfsd4_put_session(struct nfsd4_session *ses)
203 {
204 	struct nfs4_client *clp = ses->se_client;
205 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
206 
207 	spin_lock(&nn->client_lock);
208 	nfsd4_put_session_locked(ses);
209 	spin_unlock(&nn->client_lock);
210 }
211 
212 static inline struct nfs4_stateowner *
213 nfs4_get_stateowner(struct nfs4_stateowner *sop)
214 {
215 	atomic_inc(&sop->so_count);
216 	return sop;
217 }
218 
219 static int
220 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
221 {
222 	return (sop->so_owner.len == owner->len) &&
223 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
224 }
225 
226 static struct nfs4_openowner *
227 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
228 			struct nfs4_client *clp)
229 {
230 	struct nfs4_stateowner *so;
231 
232 	lockdep_assert_held(&clp->cl_lock);
233 
234 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
235 			    so_strhash) {
236 		if (!so->so_is_open_owner)
237 			continue;
238 		if (same_owner_str(so, &open->op_owner))
239 			return openowner(nfs4_get_stateowner(so));
240 	}
241 	return NULL;
242 }
243 
244 static struct nfs4_openowner *
245 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
246 			struct nfs4_client *clp)
247 {
248 	struct nfs4_openowner *oo;
249 
250 	spin_lock(&clp->cl_lock);
251 	oo = find_openstateowner_str_locked(hashval, open, clp);
252 	spin_unlock(&clp->cl_lock);
253 	return oo;
254 }
255 
256 static inline u32
257 opaque_hashval(const void *ptr, int nbytes)
258 {
259 	unsigned char *cptr = (unsigned char *) ptr;
260 
261 	u32 x = 0;
262 	while (nbytes--) {
263 		x *= 37;
264 		x += *cptr++;
265 	}
266 	return x;
267 }
268 
269 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
270 {
271 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
272 
273 	kmem_cache_free(file_slab, fp);
274 }
275 
276 void
277 put_nfs4_file(struct nfs4_file *fi)
278 {
279 	might_lock(&state_lock);
280 
281 	if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
282 		hlist_del_rcu(&fi->fi_hash);
283 		spin_unlock(&state_lock);
284 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
285 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
286 	}
287 }
288 
289 static struct file *
290 __nfs4_get_fd(struct nfs4_file *f, int oflag)
291 {
292 	if (f->fi_fds[oflag])
293 		return get_file(f->fi_fds[oflag]);
294 	return NULL;
295 }
296 
297 static struct file *
298 find_writeable_file_locked(struct nfs4_file *f)
299 {
300 	struct file *ret;
301 
302 	lockdep_assert_held(&f->fi_lock);
303 
304 	ret = __nfs4_get_fd(f, O_WRONLY);
305 	if (!ret)
306 		ret = __nfs4_get_fd(f, O_RDWR);
307 	return ret;
308 }
309 
310 static struct file *
311 find_writeable_file(struct nfs4_file *f)
312 {
313 	struct file *ret;
314 
315 	spin_lock(&f->fi_lock);
316 	ret = find_writeable_file_locked(f);
317 	spin_unlock(&f->fi_lock);
318 
319 	return ret;
320 }
321 
322 static struct file *find_readable_file_locked(struct nfs4_file *f)
323 {
324 	struct file *ret;
325 
326 	lockdep_assert_held(&f->fi_lock);
327 
328 	ret = __nfs4_get_fd(f, O_RDONLY);
329 	if (!ret)
330 		ret = __nfs4_get_fd(f, O_RDWR);
331 	return ret;
332 }
333 
334 static struct file *
335 find_readable_file(struct nfs4_file *f)
336 {
337 	struct file *ret;
338 
339 	spin_lock(&f->fi_lock);
340 	ret = find_readable_file_locked(f);
341 	spin_unlock(&f->fi_lock);
342 
343 	return ret;
344 }
345 
346 struct file *
347 find_any_file(struct nfs4_file *f)
348 {
349 	struct file *ret;
350 
351 	spin_lock(&f->fi_lock);
352 	ret = __nfs4_get_fd(f, O_RDWR);
353 	if (!ret) {
354 		ret = __nfs4_get_fd(f, O_WRONLY);
355 		if (!ret)
356 			ret = __nfs4_get_fd(f, O_RDONLY);
357 	}
358 	spin_unlock(&f->fi_lock);
359 	return ret;
360 }
361 
362 static atomic_long_t num_delegations;
363 unsigned long max_delegations;
364 
365 /*
366  * Open owner state (share locks)
367  */
368 
369 /* hash tables for lock and open owners */
370 #define OWNER_HASH_BITS              8
371 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
372 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
373 
374 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
375 {
376 	unsigned int ret;
377 
378 	ret = opaque_hashval(ownername->data, ownername->len);
379 	return ret & OWNER_HASH_MASK;
380 }
381 
382 /* hash table for nfs4_file */
383 #define FILE_HASH_BITS                   8
384 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
385 
386 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
387 {
388 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
389 }
390 
391 static unsigned int file_hashval(struct knfsd_fh *fh)
392 {
393 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
394 }
395 
396 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
397 
398 static void
399 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
400 {
401 	lockdep_assert_held(&fp->fi_lock);
402 
403 	if (access & NFS4_SHARE_ACCESS_WRITE)
404 		atomic_inc(&fp->fi_access[O_WRONLY]);
405 	if (access & NFS4_SHARE_ACCESS_READ)
406 		atomic_inc(&fp->fi_access[O_RDONLY]);
407 }
408 
409 static __be32
410 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
411 {
412 	lockdep_assert_held(&fp->fi_lock);
413 
414 	/* Does this access mode make sense? */
415 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
416 		return nfserr_inval;
417 
418 	/* Does it conflict with a deny mode already set? */
419 	if ((access & fp->fi_share_deny) != 0)
420 		return nfserr_share_denied;
421 
422 	__nfs4_file_get_access(fp, access);
423 	return nfs_ok;
424 }
425 
426 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
427 {
428 	/* Common case is that there is no deny mode. */
429 	if (deny) {
430 		/* Does this deny mode make sense? */
431 		if (deny & ~NFS4_SHARE_DENY_BOTH)
432 			return nfserr_inval;
433 
434 		if ((deny & NFS4_SHARE_DENY_READ) &&
435 		    atomic_read(&fp->fi_access[O_RDONLY]))
436 			return nfserr_share_denied;
437 
438 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
439 		    atomic_read(&fp->fi_access[O_WRONLY]))
440 			return nfserr_share_denied;
441 	}
442 	return nfs_ok;
443 }
444 
445 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
446 {
447 	might_lock(&fp->fi_lock);
448 
449 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
450 		struct file *f1 = NULL;
451 		struct file *f2 = NULL;
452 
453 		swap(f1, fp->fi_fds[oflag]);
454 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
455 			swap(f2, fp->fi_fds[O_RDWR]);
456 		spin_unlock(&fp->fi_lock);
457 		if (f1)
458 			fput(f1);
459 		if (f2)
460 			fput(f2);
461 	}
462 }
463 
464 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
465 {
466 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
467 
468 	if (access & NFS4_SHARE_ACCESS_WRITE)
469 		__nfs4_file_put_access(fp, O_WRONLY);
470 	if (access & NFS4_SHARE_ACCESS_READ)
471 		__nfs4_file_put_access(fp, O_RDONLY);
472 }
473 
474 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
475 					 struct kmem_cache *slab)
476 {
477 	struct nfs4_stid *stid;
478 	int new_id;
479 
480 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
481 	if (!stid)
482 		return NULL;
483 
484 	idr_preload(GFP_KERNEL);
485 	spin_lock(&cl->cl_lock);
486 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
487 	spin_unlock(&cl->cl_lock);
488 	idr_preload_end();
489 	if (new_id < 0)
490 		goto out_free;
491 	stid->sc_client = cl;
492 	stid->sc_stateid.si_opaque.so_id = new_id;
493 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
494 	/* Will be incremented before return to client: */
495 	atomic_set(&stid->sc_count, 1);
496 
497 	/*
498 	 * It shouldn't be a problem to reuse an opaque stateid value.
499 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
500 	 * example, a stray write retransmission could be accepted by
501 	 * the server when it should have been rejected.  Therefore,
502 	 * adopt a trick from the sctp code to attempt to maximize the
503 	 * amount of time until an id is reused, by ensuring they always
504 	 * "increase" (mod INT_MAX):
505 	 */
506 	return stid;
507 out_free:
508 	kmem_cache_free(slab, stid);
509 	return NULL;
510 }
511 
512 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
513 {
514 	struct nfs4_stid *stid;
515 	struct nfs4_ol_stateid *stp;
516 
517 	stid = nfs4_alloc_stid(clp, stateid_slab);
518 	if (!stid)
519 		return NULL;
520 
521 	stp = openlockstateid(stid);
522 	stp->st_stid.sc_free = nfs4_free_ol_stateid;
523 	return stp;
524 }
525 
526 static void nfs4_free_deleg(struct nfs4_stid *stid)
527 {
528 	kmem_cache_free(deleg_slab, stid);
529 	atomic_long_dec(&num_delegations);
530 }
531 
532 /*
533  * When we recall a delegation, we should be careful not to hand it
534  * out again straight away.
535  * To ensure this we keep a pair of bloom filters ('new' and 'old')
536  * in which the filehandles of recalled delegations are "stored".
537  * If a filehandle appear in either filter, a delegation is blocked.
538  * When a delegation is recalled, the filehandle is stored in the "new"
539  * filter.
540  * Every 30 seconds we swap the filters and clear the "new" one,
541  * unless both are empty of course.
542  *
543  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
544  * low 3 bytes as hash-table indices.
545  *
546  * 'blocked_delegations_lock', which is always taken in block_delegations(),
547  * is used to manage concurrent access.  Testing does not need the lock
548  * except when swapping the two filters.
549  */
550 static DEFINE_SPINLOCK(blocked_delegations_lock);
551 static struct bloom_pair {
552 	int	entries, old_entries;
553 	time_t	swap_time;
554 	int	new; /* index into 'set' */
555 	DECLARE_BITMAP(set[2], 256);
556 } blocked_delegations;
557 
558 static int delegation_blocked(struct knfsd_fh *fh)
559 {
560 	u32 hash;
561 	struct bloom_pair *bd = &blocked_delegations;
562 
563 	if (bd->entries == 0)
564 		return 0;
565 	if (seconds_since_boot() - bd->swap_time > 30) {
566 		spin_lock(&blocked_delegations_lock);
567 		if (seconds_since_boot() - bd->swap_time > 30) {
568 			bd->entries -= bd->old_entries;
569 			bd->old_entries = bd->entries;
570 			memset(bd->set[bd->new], 0,
571 			       sizeof(bd->set[0]));
572 			bd->new = 1-bd->new;
573 			bd->swap_time = seconds_since_boot();
574 		}
575 		spin_unlock(&blocked_delegations_lock);
576 	}
577 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
578 	if (test_bit(hash&255, bd->set[0]) &&
579 	    test_bit((hash>>8)&255, bd->set[0]) &&
580 	    test_bit((hash>>16)&255, bd->set[0]))
581 		return 1;
582 
583 	if (test_bit(hash&255, bd->set[1]) &&
584 	    test_bit((hash>>8)&255, bd->set[1]) &&
585 	    test_bit((hash>>16)&255, bd->set[1]))
586 		return 1;
587 
588 	return 0;
589 }
590 
591 static void block_delegations(struct knfsd_fh *fh)
592 {
593 	u32 hash;
594 	struct bloom_pair *bd = &blocked_delegations;
595 
596 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
597 
598 	spin_lock(&blocked_delegations_lock);
599 	__set_bit(hash&255, bd->set[bd->new]);
600 	__set_bit((hash>>8)&255, bd->set[bd->new]);
601 	__set_bit((hash>>16)&255, bd->set[bd->new]);
602 	if (bd->entries == 0)
603 		bd->swap_time = seconds_since_boot();
604 	bd->entries += 1;
605 	spin_unlock(&blocked_delegations_lock);
606 }
607 
608 static struct nfs4_delegation *
609 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
610 {
611 	struct nfs4_delegation *dp;
612 	long n;
613 
614 	dprintk("NFSD alloc_init_deleg\n");
615 	n = atomic_long_inc_return(&num_delegations);
616 	if (n < 0 || n > max_delegations)
617 		goto out_dec;
618 	if (delegation_blocked(&current_fh->fh_handle))
619 		goto out_dec;
620 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
621 	if (dp == NULL)
622 		goto out_dec;
623 
624 	dp->dl_stid.sc_free = nfs4_free_deleg;
625 	/*
626 	 * delegation seqid's are never incremented.  The 4.1 special
627 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
628 	 * 0 anyway just for consistency and use 1:
629 	 */
630 	dp->dl_stid.sc_stateid.si_generation = 1;
631 	INIT_LIST_HEAD(&dp->dl_perfile);
632 	INIT_LIST_HEAD(&dp->dl_perclnt);
633 	INIT_LIST_HEAD(&dp->dl_recall_lru);
634 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
635 	dp->dl_retries = 1;
636 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
637 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
638 	return dp;
639 out_dec:
640 	atomic_long_dec(&num_delegations);
641 	return NULL;
642 }
643 
644 void
645 nfs4_put_stid(struct nfs4_stid *s)
646 {
647 	struct nfs4_file *fp = s->sc_file;
648 	struct nfs4_client *clp = s->sc_client;
649 
650 	might_lock(&clp->cl_lock);
651 
652 	if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
653 		wake_up_all(&close_wq);
654 		return;
655 	}
656 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
657 	spin_unlock(&clp->cl_lock);
658 	s->sc_free(s);
659 	if (fp)
660 		put_nfs4_file(fp);
661 }
662 
663 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
664 {
665 	struct file *filp = NULL;
666 
667 	spin_lock(&fp->fi_lock);
668 	if (fp->fi_deleg_file && --fp->fi_delegees == 0)
669 		swap(filp, fp->fi_deleg_file);
670 	spin_unlock(&fp->fi_lock);
671 
672 	if (filp) {
673 		vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
674 		fput(filp);
675 	}
676 }
677 
678 void nfs4_unhash_stid(struct nfs4_stid *s)
679 {
680 	s->sc_type = 0;
681 }
682 
683 static void
684 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
685 {
686 	lockdep_assert_held(&state_lock);
687 	lockdep_assert_held(&fp->fi_lock);
688 
689 	atomic_inc(&dp->dl_stid.sc_count);
690 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
691 	list_add(&dp->dl_perfile, &fp->fi_delegations);
692 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
693 }
694 
695 static void
696 unhash_delegation_locked(struct nfs4_delegation *dp)
697 {
698 	struct nfs4_file *fp = dp->dl_stid.sc_file;
699 
700 	lockdep_assert_held(&state_lock);
701 
702 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
703 	/* Ensure that deleg break won't try to requeue it */
704 	++dp->dl_time;
705 	spin_lock(&fp->fi_lock);
706 	list_del_init(&dp->dl_perclnt);
707 	list_del_init(&dp->dl_recall_lru);
708 	list_del_init(&dp->dl_perfile);
709 	spin_unlock(&fp->fi_lock);
710 }
711 
712 static void destroy_delegation(struct nfs4_delegation *dp)
713 {
714 	spin_lock(&state_lock);
715 	unhash_delegation_locked(dp);
716 	spin_unlock(&state_lock);
717 	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
718 	nfs4_put_stid(&dp->dl_stid);
719 }
720 
721 static void revoke_delegation(struct nfs4_delegation *dp)
722 {
723 	struct nfs4_client *clp = dp->dl_stid.sc_client;
724 
725 	WARN_ON(!list_empty(&dp->dl_recall_lru));
726 
727 	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
728 
729 	if (clp->cl_minorversion == 0)
730 		nfs4_put_stid(&dp->dl_stid);
731 	else {
732 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
733 		spin_lock(&clp->cl_lock);
734 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
735 		spin_unlock(&clp->cl_lock);
736 	}
737 }
738 
739 /*
740  * SETCLIENTID state
741  */
742 
743 static unsigned int clientid_hashval(u32 id)
744 {
745 	return id & CLIENT_HASH_MASK;
746 }
747 
748 static unsigned int clientstr_hashval(const char *name)
749 {
750 	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
751 }
752 
753 /*
754  * We store the NONE, READ, WRITE, and BOTH bits separately in the
755  * st_{access,deny}_bmap field of the stateid, in order to track not
756  * only what share bits are currently in force, but also what
757  * combinations of share bits previous opens have used.  This allows us
758  * to enforce the recommendation of rfc 3530 14.2.19 that the server
759  * return an error if the client attempt to downgrade to a combination
760  * of share bits not explicable by closing some of its previous opens.
761  *
762  * XXX: This enforcement is actually incomplete, since we don't keep
763  * track of access/deny bit combinations; so, e.g., we allow:
764  *
765  *	OPEN allow read, deny write
766  *	OPEN allow both, deny none
767  *	DOWNGRADE allow read, deny none
768  *
769  * which we should reject.
770  */
771 static unsigned int
772 bmap_to_share_mode(unsigned long bmap) {
773 	int i;
774 	unsigned int access = 0;
775 
776 	for (i = 1; i < 4; i++) {
777 		if (test_bit(i, &bmap))
778 			access |= i;
779 	}
780 	return access;
781 }
782 
783 /* set share access for a given stateid */
784 static inline void
785 set_access(u32 access, struct nfs4_ol_stateid *stp)
786 {
787 	unsigned char mask = 1 << access;
788 
789 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
790 	stp->st_access_bmap |= mask;
791 }
792 
793 /* clear share access for a given stateid */
794 static inline void
795 clear_access(u32 access, struct nfs4_ol_stateid *stp)
796 {
797 	unsigned char mask = 1 << access;
798 
799 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
800 	stp->st_access_bmap &= ~mask;
801 }
802 
803 /* test whether a given stateid has access */
804 static inline bool
805 test_access(u32 access, struct nfs4_ol_stateid *stp)
806 {
807 	unsigned char mask = 1 << access;
808 
809 	return (bool)(stp->st_access_bmap & mask);
810 }
811 
812 /* set share deny for a given stateid */
813 static inline void
814 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
815 {
816 	unsigned char mask = 1 << deny;
817 
818 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
819 	stp->st_deny_bmap |= mask;
820 }
821 
822 /* clear share deny for a given stateid */
823 static inline void
824 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
825 {
826 	unsigned char mask = 1 << deny;
827 
828 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
829 	stp->st_deny_bmap &= ~mask;
830 }
831 
832 /* test whether a given stateid is denying specific access */
833 static inline bool
834 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
835 {
836 	unsigned char mask = 1 << deny;
837 
838 	return (bool)(stp->st_deny_bmap & mask);
839 }
840 
841 static int nfs4_access_to_omode(u32 access)
842 {
843 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
844 	case NFS4_SHARE_ACCESS_READ:
845 		return O_RDONLY;
846 	case NFS4_SHARE_ACCESS_WRITE:
847 		return O_WRONLY;
848 	case NFS4_SHARE_ACCESS_BOTH:
849 		return O_RDWR;
850 	}
851 	WARN_ON_ONCE(1);
852 	return O_RDONLY;
853 }
854 
855 /*
856  * A stateid that had a deny mode associated with it is being released
857  * or downgraded. Recalculate the deny mode on the file.
858  */
859 static void
860 recalculate_deny_mode(struct nfs4_file *fp)
861 {
862 	struct nfs4_ol_stateid *stp;
863 
864 	spin_lock(&fp->fi_lock);
865 	fp->fi_share_deny = 0;
866 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
867 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
868 	spin_unlock(&fp->fi_lock);
869 }
870 
871 static void
872 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
873 {
874 	int i;
875 	bool change = false;
876 
877 	for (i = 1; i < 4; i++) {
878 		if ((i & deny) != i) {
879 			change = true;
880 			clear_deny(i, stp);
881 		}
882 	}
883 
884 	/* Recalculate per-file deny mode if there was a change */
885 	if (change)
886 		recalculate_deny_mode(stp->st_stid.sc_file);
887 }
888 
889 /* release all access and file references for a given stateid */
890 static void
891 release_all_access(struct nfs4_ol_stateid *stp)
892 {
893 	int i;
894 	struct nfs4_file *fp = stp->st_stid.sc_file;
895 
896 	if (fp && stp->st_deny_bmap != 0)
897 		recalculate_deny_mode(fp);
898 
899 	for (i = 1; i < 4; i++) {
900 		if (test_access(i, stp))
901 			nfs4_file_put_access(stp->st_stid.sc_file, i);
902 		clear_access(i, stp);
903 	}
904 }
905 
906 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
907 {
908 	struct nfs4_client *clp = sop->so_client;
909 
910 	might_lock(&clp->cl_lock);
911 
912 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
913 		return;
914 	sop->so_ops->so_unhash(sop);
915 	spin_unlock(&clp->cl_lock);
916 	kfree(sop->so_owner.data);
917 	sop->so_ops->so_free(sop);
918 }
919 
920 static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
921 {
922 	struct nfs4_file *fp = stp->st_stid.sc_file;
923 
924 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
925 
926 	spin_lock(&fp->fi_lock);
927 	list_del(&stp->st_perfile);
928 	spin_unlock(&fp->fi_lock);
929 	list_del(&stp->st_perstateowner);
930 }
931 
932 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
933 {
934 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
935 
936 	release_all_access(stp);
937 	if (stp->st_stateowner)
938 		nfs4_put_stateowner(stp->st_stateowner);
939 	kmem_cache_free(stateid_slab, stid);
940 }
941 
942 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
943 {
944 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
945 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
946 	struct file *file;
947 
948 	file = find_any_file(stp->st_stid.sc_file);
949 	if (file)
950 		filp_close(file, (fl_owner_t)lo);
951 	nfs4_free_ol_stateid(stid);
952 }
953 
954 /*
955  * Put the persistent reference to an already unhashed generic stateid, while
956  * holding the cl_lock. If it's the last reference, then put it onto the
957  * reaplist for later destruction.
958  */
959 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
960 				       struct list_head *reaplist)
961 {
962 	struct nfs4_stid *s = &stp->st_stid;
963 	struct nfs4_client *clp = s->sc_client;
964 
965 	lockdep_assert_held(&clp->cl_lock);
966 
967 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
968 
969 	if (!atomic_dec_and_test(&s->sc_count)) {
970 		wake_up_all(&close_wq);
971 		return;
972 	}
973 
974 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
975 	list_add(&stp->st_locks, reaplist);
976 }
977 
978 static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
979 {
980 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
981 
982 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
983 
984 	list_del_init(&stp->st_locks);
985 	unhash_ol_stateid(stp);
986 	nfs4_unhash_stid(&stp->st_stid);
987 }
988 
989 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
990 {
991 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
992 
993 	spin_lock(&oo->oo_owner.so_client->cl_lock);
994 	unhash_lock_stateid(stp);
995 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
996 	nfs4_put_stid(&stp->st_stid);
997 }
998 
999 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1000 {
1001 	struct nfs4_client *clp = lo->lo_owner.so_client;
1002 
1003 	lockdep_assert_held(&clp->cl_lock);
1004 
1005 	list_del_init(&lo->lo_owner.so_strhash);
1006 }
1007 
1008 /*
1009  * Free a list of generic stateids that were collected earlier after being
1010  * fully unhashed.
1011  */
1012 static void
1013 free_ol_stateid_reaplist(struct list_head *reaplist)
1014 {
1015 	struct nfs4_ol_stateid *stp;
1016 	struct nfs4_file *fp;
1017 
1018 	might_sleep();
1019 
1020 	while (!list_empty(reaplist)) {
1021 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1022 				       st_locks);
1023 		list_del(&stp->st_locks);
1024 		fp = stp->st_stid.sc_file;
1025 		stp->st_stid.sc_free(&stp->st_stid);
1026 		if (fp)
1027 			put_nfs4_file(fp);
1028 	}
1029 }
1030 
1031 static void release_lockowner(struct nfs4_lockowner *lo)
1032 {
1033 	struct nfs4_client *clp = lo->lo_owner.so_client;
1034 	struct nfs4_ol_stateid *stp;
1035 	struct list_head reaplist;
1036 
1037 	INIT_LIST_HEAD(&reaplist);
1038 
1039 	spin_lock(&clp->cl_lock);
1040 	unhash_lockowner_locked(lo);
1041 	while (!list_empty(&lo->lo_owner.so_stateids)) {
1042 		stp = list_first_entry(&lo->lo_owner.so_stateids,
1043 				struct nfs4_ol_stateid, st_perstateowner);
1044 		unhash_lock_stateid(stp);
1045 		put_ol_stateid_locked(stp, &reaplist);
1046 	}
1047 	spin_unlock(&clp->cl_lock);
1048 	free_ol_stateid_reaplist(&reaplist);
1049 	nfs4_put_stateowner(&lo->lo_owner);
1050 }
1051 
1052 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1053 				       struct list_head *reaplist)
1054 {
1055 	struct nfs4_ol_stateid *stp;
1056 
1057 	while (!list_empty(&open_stp->st_locks)) {
1058 		stp = list_entry(open_stp->st_locks.next,
1059 				struct nfs4_ol_stateid, st_locks);
1060 		unhash_lock_stateid(stp);
1061 		put_ol_stateid_locked(stp, reaplist);
1062 	}
1063 }
1064 
1065 static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
1066 				struct list_head *reaplist)
1067 {
1068 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1069 
1070 	unhash_ol_stateid(stp);
1071 	release_open_stateid_locks(stp, reaplist);
1072 }
1073 
1074 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1075 {
1076 	LIST_HEAD(reaplist);
1077 
1078 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1079 	unhash_open_stateid(stp, &reaplist);
1080 	put_ol_stateid_locked(stp, &reaplist);
1081 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1082 	free_ol_stateid_reaplist(&reaplist);
1083 }
1084 
1085 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1086 {
1087 	struct nfs4_client *clp = oo->oo_owner.so_client;
1088 
1089 	lockdep_assert_held(&clp->cl_lock);
1090 
1091 	list_del_init(&oo->oo_owner.so_strhash);
1092 	list_del_init(&oo->oo_perclient);
1093 }
1094 
1095 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1096 {
1097 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1098 					  nfsd_net_id);
1099 	struct nfs4_ol_stateid *s;
1100 
1101 	spin_lock(&nn->client_lock);
1102 	s = oo->oo_last_closed_stid;
1103 	if (s) {
1104 		list_del_init(&oo->oo_close_lru);
1105 		oo->oo_last_closed_stid = NULL;
1106 	}
1107 	spin_unlock(&nn->client_lock);
1108 	if (s)
1109 		nfs4_put_stid(&s->st_stid);
1110 }
1111 
1112 static void release_openowner(struct nfs4_openowner *oo)
1113 {
1114 	struct nfs4_ol_stateid *stp;
1115 	struct nfs4_client *clp = oo->oo_owner.so_client;
1116 	struct list_head reaplist;
1117 
1118 	INIT_LIST_HEAD(&reaplist);
1119 
1120 	spin_lock(&clp->cl_lock);
1121 	unhash_openowner_locked(oo);
1122 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1123 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1124 				struct nfs4_ol_stateid, st_perstateowner);
1125 		unhash_open_stateid(stp, &reaplist);
1126 		put_ol_stateid_locked(stp, &reaplist);
1127 	}
1128 	spin_unlock(&clp->cl_lock);
1129 	free_ol_stateid_reaplist(&reaplist);
1130 	release_last_closed_stateid(oo);
1131 	nfs4_put_stateowner(&oo->oo_owner);
1132 }
1133 
1134 static inline int
1135 hash_sessionid(struct nfs4_sessionid *sessionid)
1136 {
1137 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1138 
1139 	return sid->sequence % SESSION_HASH_SIZE;
1140 }
1141 
1142 #ifdef NFSD_DEBUG
1143 static inline void
1144 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1145 {
1146 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1147 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1148 }
1149 #else
1150 static inline void
1151 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1152 {
1153 }
1154 #endif
1155 
1156 /*
1157  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1158  * won't be used for replay.
1159  */
1160 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1161 {
1162 	struct nfs4_stateowner *so = cstate->replay_owner;
1163 
1164 	if (nfserr == nfserr_replay_me)
1165 		return;
1166 
1167 	if (!seqid_mutating_err(ntohl(nfserr))) {
1168 		nfsd4_cstate_clear_replay(cstate);
1169 		return;
1170 	}
1171 	if (!so)
1172 		return;
1173 	if (so->so_is_open_owner)
1174 		release_last_closed_stateid(openowner(so));
1175 	so->so_seqid++;
1176 	return;
1177 }
1178 
1179 static void
1180 gen_sessionid(struct nfsd4_session *ses)
1181 {
1182 	struct nfs4_client *clp = ses->se_client;
1183 	struct nfsd4_sessionid *sid;
1184 
1185 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1186 	sid->clientid = clp->cl_clientid;
1187 	sid->sequence = current_sessionid++;
1188 	sid->reserved = 0;
1189 }
1190 
1191 /*
1192  * The protocol defines ca_maxresponssize_cached to include the size of
1193  * the rpc header, but all we need to cache is the data starting after
1194  * the end of the initial SEQUENCE operation--the rest we regenerate
1195  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1196  * value that is the number of bytes in our cache plus a few additional
1197  * bytes.  In order to stay on the safe side, and not promise more than
1198  * we can cache, those additional bytes must be the minimum possible: 24
1199  * bytes of rpc header (xid through accept state, with AUTH_NULL
1200  * verifier), 12 for the compound header (with zero-length tag), and 44
1201  * for the SEQUENCE op response:
1202  */
1203 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1204 
1205 static void
1206 free_session_slots(struct nfsd4_session *ses)
1207 {
1208 	int i;
1209 
1210 	for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1211 		kfree(ses->se_slots[i]);
1212 }
1213 
1214 /*
1215  * We don't actually need to cache the rpc and session headers, so we
1216  * can allocate a little less for each slot:
1217  */
1218 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1219 {
1220 	u32 size;
1221 
1222 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1223 		size = 0;
1224 	else
1225 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1226 	return size + sizeof(struct nfsd4_slot);
1227 }
1228 
1229 /*
1230  * XXX: If we run out of reserved DRC memory we could (up to a point)
1231  * re-negotiate active sessions and reduce their slot usage to make
1232  * room for new connections. For now we just fail the create session.
1233  */
1234 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1235 {
1236 	u32 slotsize = slot_bytes(ca);
1237 	u32 num = ca->maxreqs;
1238 	int avail;
1239 
1240 	spin_lock(&nfsd_drc_lock);
1241 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1242 		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1243 	num = min_t(int, num, avail / slotsize);
1244 	nfsd_drc_mem_used += num * slotsize;
1245 	spin_unlock(&nfsd_drc_lock);
1246 
1247 	return num;
1248 }
1249 
1250 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1251 {
1252 	int slotsize = slot_bytes(ca);
1253 
1254 	spin_lock(&nfsd_drc_lock);
1255 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1256 	spin_unlock(&nfsd_drc_lock);
1257 }
1258 
1259 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1260 					   struct nfsd4_channel_attrs *battrs)
1261 {
1262 	int numslots = fattrs->maxreqs;
1263 	int slotsize = slot_bytes(fattrs);
1264 	struct nfsd4_session *new;
1265 	int mem, i;
1266 
1267 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1268 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1269 	mem = numslots * sizeof(struct nfsd4_slot *);
1270 
1271 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1272 	if (!new)
1273 		return NULL;
1274 	/* allocate each struct nfsd4_slot and data cache in one piece */
1275 	for (i = 0; i < numslots; i++) {
1276 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1277 		if (!new->se_slots[i])
1278 			goto out_free;
1279 	}
1280 
1281 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1282 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1283 
1284 	return new;
1285 out_free:
1286 	while (i--)
1287 		kfree(new->se_slots[i]);
1288 	kfree(new);
1289 	return NULL;
1290 }
1291 
1292 static void free_conn(struct nfsd4_conn *c)
1293 {
1294 	svc_xprt_put(c->cn_xprt);
1295 	kfree(c);
1296 }
1297 
1298 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1299 {
1300 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1301 	struct nfs4_client *clp = c->cn_session->se_client;
1302 
1303 	spin_lock(&clp->cl_lock);
1304 	if (!list_empty(&c->cn_persession)) {
1305 		list_del(&c->cn_persession);
1306 		free_conn(c);
1307 	}
1308 	nfsd4_probe_callback(clp);
1309 	spin_unlock(&clp->cl_lock);
1310 }
1311 
1312 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1313 {
1314 	struct nfsd4_conn *conn;
1315 
1316 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1317 	if (!conn)
1318 		return NULL;
1319 	svc_xprt_get(rqstp->rq_xprt);
1320 	conn->cn_xprt = rqstp->rq_xprt;
1321 	conn->cn_flags = flags;
1322 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1323 	return conn;
1324 }
1325 
1326 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1327 {
1328 	conn->cn_session = ses;
1329 	list_add(&conn->cn_persession, &ses->se_conns);
1330 }
1331 
1332 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1333 {
1334 	struct nfs4_client *clp = ses->se_client;
1335 
1336 	spin_lock(&clp->cl_lock);
1337 	__nfsd4_hash_conn(conn, ses);
1338 	spin_unlock(&clp->cl_lock);
1339 }
1340 
1341 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1342 {
1343 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1344 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1345 }
1346 
1347 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1348 {
1349 	int ret;
1350 
1351 	nfsd4_hash_conn(conn, ses);
1352 	ret = nfsd4_register_conn(conn);
1353 	if (ret)
1354 		/* oops; xprt is already down: */
1355 		nfsd4_conn_lost(&conn->cn_xpt_user);
1356 	/* We may have gained or lost a callback channel: */
1357 	nfsd4_probe_callback_sync(ses->se_client);
1358 }
1359 
1360 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1361 {
1362 	u32 dir = NFS4_CDFC4_FORE;
1363 
1364 	if (cses->flags & SESSION4_BACK_CHAN)
1365 		dir |= NFS4_CDFC4_BACK;
1366 	return alloc_conn(rqstp, dir);
1367 }
1368 
1369 /* must be called under client_lock */
1370 static void nfsd4_del_conns(struct nfsd4_session *s)
1371 {
1372 	struct nfs4_client *clp = s->se_client;
1373 	struct nfsd4_conn *c;
1374 
1375 	spin_lock(&clp->cl_lock);
1376 	while (!list_empty(&s->se_conns)) {
1377 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1378 		list_del_init(&c->cn_persession);
1379 		spin_unlock(&clp->cl_lock);
1380 
1381 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1382 		free_conn(c);
1383 
1384 		spin_lock(&clp->cl_lock);
1385 	}
1386 	spin_unlock(&clp->cl_lock);
1387 }
1388 
1389 static void __free_session(struct nfsd4_session *ses)
1390 {
1391 	free_session_slots(ses);
1392 	kfree(ses);
1393 }
1394 
1395 static void free_session(struct nfsd4_session *ses)
1396 {
1397 	nfsd4_del_conns(ses);
1398 	nfsd4_put_drc_mem(&ses->se_fchannel);
1399 	__free_session(ses);
1400 }
1401 
1402 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1403 {
1404 	int idx;
1405 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1406 
1407 	new->se_client = clp;
1408 	gen_sessionid(new);
1409 
1410 	INIT_LIST_HEAD(&new->se_conns);
1411 
1412 	new->se_cb_seq_nr = 1;
1413 	new->se_flags = cses->flags;
1414 	new->se_cb_prog = cses->callback_prog;
1415 	new->se_cb_sec = cses->cb_sec;
1416 	atomic_set(&new->se_ref, 0);
1417 	idx = hash_sessionid(&new->se_sessionid);
1418 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1419 	spin_lock(&clp->cl_lock);
1420 	list_add(&new->se_perclnt, &clp->cl_sessions);
1421 	spin_unlock(&clp->cl_lock);
1422 
1423 	{
1424 		struct sockaddr *sa = svc_addr(rqstp);
1425 		/*
1426 		 * This is a little silly; with sessions there's no real
1427 		 * use for the callback address.  Use the peer address
1428 		 * as a reasonable default for now, but consider fixing
1429 		 * the rpc client not to require an address in the
1430 		 * future:
1431 		 */
1432 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1433 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1434 	}
1435 }
1436 
1437 /* caller must hold client_lock */
1438 static struct nfsd4_session *
1439 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1440 {
1441 	struct nfsd4_session *elem;
1442 	int idx;
1443 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1444 
1445 	lockdep_assert_held(&nn->client_lock);
1446 
1447 	dump_sessionid(__func__, sessionid);
1448 	idx = hash_sessionid(sessionid);
1449 	/* Search in the appropriate list */
1450 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1451 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1452 			    NFS4_MAX_SESSIONID_LEN)) {
1453 			return elem;
1454 		}
1455 	}
1456 
1457 	dprintk("%s: session not found\n", __func__);
1458 	return NULL;
1459 }
1460 
1461 static struct nfsd4_session *
1462 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1463 		__be32 *ret)
1464 {
1465 	struct nfsd4_session *session;
1466 	__be32 status = nfserr_badsession;
1467 
1468 	session = __find_in_sessionid_hashtbl(sessionid, net);
1469 	if (!session)
1470 		goto out;
1471 	status = nfsd4_get_session_locked(session);
1472 	if (status)
1473 		session = NULL;
1474 out:
1475 	*ret = status;
1476 	return session;
1477 }
1478 
1479 /* caller must hold client_lock */
1480 static void
1481 unhash_session(struct nfsd4_session *ses)
1482 {
1483 	struct nfs4_client *clp = ses->se_client;
1484 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1485 
1486 	lockdep_assert_held(&nn->client_lock);
1487 
1488 	list_del(&ses->se_hash);
1489 	spin_lock(&ses->se_client->cl_lock);
1490 	list_del(&ses->se_perclnt);
1491 	spin_unlock(&ses->se_client->cl_lock);
1492 }
1493 
1494 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1495 static int
1496 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1497 {
1498 	/*
1499 	 * We're assuming the clid was not given out from a boot
1500 	 * precisely 2^32 (about 136 years) before this one.  That seems
1501 	 * a safe assumption:
1502 	 */
1503 	if (clid->cl_boot == (u32)nn->boot_time)
1504 		return 0;
1505 	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1506 		clid->cl_boot, clid->cl_id, nn->boot_time);
1507 	return 1;
1508 }
1509 
1510 /*
1511  * XXX Should we use a slab cache ?
1512  * This type of memory management is somewhat inefficient, but we use it
1513  * anyway since SETCLIENTID is not a common operation.
1514  */
1515 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1516 {
1517 	struct nfs4_client *clp;
1518 	int i;
1519 
1520 	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1521 	if (clp == NULL)
1522 		return NULL;
1523 	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1524 	if (clp->cl_name.data == NULL)
1525 		goto err_no_name;
1526 	clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1527 			OWNER_HASH_SIZE, GFP_KERNEL);
1528 	if (!clp->cl_ownerstr_hashtbl)
1529 		goto err_no_hashtbl;
1530 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1531 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1532 	clp->cl_name.len = name.len;
1533 	INIT_LIST_HEAD(&clp->cl_sessions);
1534 	idr_init(&clp->cl_stateids);
1535 	atomic_set(&clp->cl_refcount, 0);
1536 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1537 	INIT_LIST_HEAD(&clp->cl_idhash);
1538 	INIT_LIST_HEAD(&clp->cl_openowners);
1539 	INIT_LIST_HEAD(&clp->cl_delegations);
1540 	INIT_LIST_HEAD(&clp->cl_lru);
1541 	INIT_LIST_HEAD(&clp->cl_callbacks);
1542 	INIT_LIST_HEAD(&clp->cl_revoked);
1543 #ifdef CONFIG_NFSD_PNFS
1544 	INIT_LIST_HEAD(&clp->cl_lo_states);
1545 #endif
1546 	spin_lock_init(&clp->cl_lock);
1547 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1548 	return clp;
1549 err_no_hashtbl:
1550 	kfree(clp->cl_name.data);
1551 err_no_name:
1552 	kfree(clp);
1553 	return NULL;
1554 }
1555 
1556 static void
1557 free_client(struct nfs4_client *clp)
1558 {
1559 	while (!list_empty(&clp->cl_sessions)) {
1560 		struct nfsd4_session *ses;
1561 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1562 				se_perclnt);
1563 		list_del(&ses->se_perclnt);
1564 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
1565 		free_session(ses);
1566 	}
1567 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1568 	free_svc_cred(&clp->cl_cred);
1569 	kfree(clp->cl_ownerstr_hashtbl);
1570 	kfree(clp->cl_name.data);
1571 	idr_destroy(&clp->cl_stateids);
1572 	kfree(clp);
1573 }
1574 
1575 /* must be called under the client_lock */
1576 static void
1577 unhash_client_locked(struct nfs4_client *clp)
1578 {
1579 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1580 	struct nfsd4_session *ses;
1581 
1582 	lockdep_assert_held(&nn->client_lock);
1583 
1584 	/* Mark the client as expired! */
1585 	clp->cl_time = 0;
1586 	/* Make it invisible */
1587 	if (!list_empty(&clp->cl_idhash)) {
1588 		list_del_init(&clp->cl_idhash);
1589 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1590 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1591 		else
1592 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1593 	}
1594 	list_del_init(&clp->cl_lru);
1595 	spin_lock(&clp->cl_lock);
1596 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1597 		list_del_init(&ses->se_hash);
1598 	spin_unlock(&clp->cl_lock);
1599 }
1600 
1601 static void
1602 unhash_client(struct nfs4_client *clp)
1603 {
1604 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1605 
1606 	spin_lock(&nn->client_lock);
1607 	unhash_client_locked(clp);
1608 	spin_unlock(&nn->client_lock);
1609 }
1610 
1611 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1612 {
1613 	if (atomic_read(&clp->cl_refcount))
1614 		return nfserr_jukebox;
1615 	unhash_client_locked(clp);
1616 	return nfs_ok;
1617 }
1618 
1619 static void
1620 __destroy_client(struct nfs4_client *clp)
1621 {
1622 	struct nfs4_openowner *oo;
1623 	struct nfs4_delegation *dp;
1624 	struct list_head reaplist;
1625 
1626 	INIT_LIST_HEAD(&reaplist);
1627 	spin_lock(&state_lock);
1628 	while (!list_empty(&clp->cl_delegations)) {
1629 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1630 		unhash_delegation_locked(dp);
1631 		list_add(&dp->dl_recall_lru, &reaplist);
1632 	}
1633 	spin_unlock(&state_lock);
1634 	while (!list_empty(&reaplist)) {
1635 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1636 		list_del_init(&dp->dl_recall_lru);
1637 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1638 		nfs4_put_stid(&dp->dl_stid);
1639 	}
1640 	while (!list_empty(&clp->cl_revoked)) {
1641 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1642 		list_del_init(&dp->dl_recall_lru);
1643 		nfs4_put_stid(&dp->dl_stid);
1644 	}
1645 	while (!list_empty(&clp->cl_openowners)) {
1646 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1647 		nfs4_get_stateowner(&oo->oo_owner);
1648 		release_openowner(oo);
1649 	}
1650 	nfsd4_return_all_client_layouts(clp);
1651 	nfsd4_shutdown_callback(clp);
1652 	if (clp->cl_cb_conn.cb_xprt)
1653 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1654 	free_client(clp);
1655 }
1656 
1657 static void
1658 destroy_client(struct nfs4_client *clp)
1659 {
1660 	unhash_client(clp);
1661 	__destroy_client(clp);
1662 }
1663 
1664 static void expire_client(struct nfs4_client *clp)
1665 {
1666 	unhash_client(clp);
1667 	nfsd4_client_record_remove(clp);
1668 	__destroy_client(clp);
1669 }
1670 
1671 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1672 {
1673 	memcpy(target->cl_verifier.data, source->data,
1674 			sizeof(target->cl_verifier.data));
1675 }
1676 
1677 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1678 {
1679 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1680 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1681 }
1682 
1683 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1684 {
1685 	if (source->cr_principal) {
1686 		target->cr_principal =
1687 				kstrdup(source->cr_principal, GFP_KERNEL);
1688 		if (target->cr_principal == NULL)
1689 			return -ENOMEM;
1690 	} else
1691 		target->cr_principal = NULL;
1692 	target->cr_flavor = source->cr_flavor;
1693 	target->cr_uid = source->cr_uid;
1694 	target->cr_gid = source->cr_gid;
1695 	target->cr_group_info = source->cr_group_info;
1696 	get_group_info(target->cr_group_info);
1697 	target->cr_gss_mech = source->cr_gss_mech;
1698 	if (source->cr_gss_mech)
1699 		gss_mech_get(source->cr_gss_mech);
1700 	return 0;
1701 }
1702 
1703 static int
1704 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1705 {
1706 	if (o1->len < o2->len)
1707 		return -1;
1708 	if (o1->len > o2->len)
1709 		return 1;
1710 	return memcmp(o1->data, o2->data, o1->len);
1711 }
1712 
1713 static int same_name(const char *n1, const char *n2)
1714 {
1715 	return 0 == memcmp(n1, n2, HEXDIR_LEN);
1716 }
1717 
1718 static int
1719 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1720 {
1721 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1722 }
1723 
1724 static int
1725 same_clid(clientid_t *cl1, clientid_t *cl2)
1726 {
1727 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1728 }
1729 
1730 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1731 {
1732 	int i;
1733 
1734 	if (g1->ngroups != g2->ngroups)
1735 		return false;
1736 	for (i=0; i<g1->ngroups; i++)
1737 		if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1738 			return false;
1739 	return true;
1740 }
1741 
1742 /*
1743  * RFC 3530 language requires clid_inuse be returned when the
1744  * "principal" associated with a requests differs from that previously
1745  * used.  We use uid, gid's, and gss principal string as our best
1746  * approximation.  We also don't want to allow non-gss use of a client
1747  * established using gss: in theory cr_principal should catch that
1748  * change, but in practice cr_principal can be null even in the gss case
1749  * since gssd doesn't always pass down a principal string.
1750  */
1751 static bool is_gss_cred(struct svc_cred *cr)
1752 {
1753 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
1754 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1755 }
1756 
1757 
1758 static bool
1759 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1760 {
1761 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1762 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1763 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1764 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1765 		return false;
1766 	if (cr1->cr_principal == cr2->cr_principal)
1767 		return true;
1768 	if (!cr1->cr_principal || !cr2->cr_principal)
1769 		return false;
1770 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1771 }
1772 
1773 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1774 {
1775 	struct svc_cred *cr = &rqstp->rq_cred;
1776 	u32 service;
1777 
1778 	if (!cr->cr_gss_mech)
1779 		return false;
1780 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1781 	return service == RPC_GSS_SVC_INTEGRITY ||
1782 	       service == RPC_GSS_SVC_PRIVACY;
1783 }
1784 
1785 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1786 {
1787 	struct svc_cred *cr = &rqstp->rq_cred;
1788 
1789 	if (!cl->cl_mach_cred)
1790 		return true;
1791 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1792 		return false;
1793 	if (!svc_rqst_integrity_protected(rqstp))
1794 		return false;
1795 	if (!cr->cr_principal)
1796 		return false;
1797 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1798 }
1799 
1800 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1801 {
1802 	__be32 verf[2];
1803 
1804 	/*
1805 	 * This is opaque to client, so no need to byte-swap. Use
1806 	 * __force to keep sparse happy
1807 	 */
1808 	verf[0] = (__force __be32)get_seconds();
1809 	verf[1] = (__force __be32)nn->clientid_counter;
1810 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1811 }
1812 
1813 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1814 {
1815 	clp->cl_clientid.cl_boot = nn->boot_time;
1816 	clp->cl_clientid.cl_id = nn->clientid_counter++;
1817 	gen_confirm(clp, nn);
1818 }
1819 
1820 static struct nfs4_stid *
1821 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1822 {
1823 	struct nfs4_stid *ret;
1824 
1825 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1826 	if (!ret || !ret->sc_type)
1827 		return NULL;
1828 	return ret;
1829 }
1830 
1831 static struct nfs4_stid *
1832 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1833 {
1834 	struct nfs4_stid *s;
1835 
1836 	spin_lock(&cl->cl_lock);
1837 	s = find_stateid_locked(cl, t);
1838 	if (s != NULL) {
1839 		if (typemask & s->sc_type)
1840 			atomic_inc(&s->sc_count);
1841 		else
1842 			s = NULL;
1843 	}
1844 	spin_unlock(&cl->cl_lock);
1845 	return s;
1846 }
1847 
1848 static struct nfs4_client *create_client(struct xdr_netobj name,
1849 		struct svc_rqst *rqstp, nfs4_verifier *verf)
1850 {
1851 	struct nfs4_client *clp;
1852 	struct sockaddr *sa = svc_addr(rqstp);
1853 	int ret;
1854 	struct net *net = SVC_NET(rqstp);
1855 
1856 	clp = alloc_client(name);
1857 	if (clp == NULL)
1858 		return NULL;
1859 
1860 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1861 	if (ret) {
1862 		free_client(clp);
1863 		return NULL;
1864 	}
1865 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
1866 	clp->cl_time = get_seconds();
1867 	clear_bit(0, &clp->cl_cb_slot_busy);
1868 	copy_verf(clp, verf);
1869 	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1870 	clp->cl_cb_session = NULL;
1871 	clp->net = net;
1872 	return clp;
1873 }
1874 
1875 static void
1876 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1877 {
1878 	struct rb_node **new = &(root->rb_node), *parent = NULL;
1879 	struct nfs4_client *clp;
1880 
1881 	while (*new) {
1882 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1883 		parent = *new;
1884 
1885 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1886 			new = &((*new)->rb_left);
1887 		else
1888 			new = &((*new)->rb_right);
1889 	}
1890 
1891 	rb_link_node(&new_clp->cl_namenode, parent, new);
1892 	rb_insert_color(&new_clp->cl_namenode, root);
1893 }
1894 
1895 static struct nfs4_client *
1896 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1897 {
1898 	int cmp;
1899 	struct rb_node *node = root->rb_node;
1900 	struct nfs4_client *clp;
1901 
1902 	while (node) {
1903 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
1904 		cmp = compare_blob(&clp->cl_name, name);
1905 		if (cmp > 0)
1906 			node = node->rb_left;
1907 		else if (cmp < 0)
1908 			node = node->rb_right;
1909 		else
1910 			return clp;
1911 	}
1912 	return NULL;
1913 }
1914 
1915 static void
1916 add_to_unconfirmed(struct nfs4_client *clp)
1917 {
1918 	unsigned int idhashval;
1919 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1920 
1921 	lockdep_assert_held(&nn->client_lock);
1922 
1923 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1924 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1925 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1926 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1927 	renew_client_locked(clp);
1928 }
1929 
1930 static void
1931 move_to_confirmed(struct nfs4_client *clp)
1932 {
1933 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1934 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1935 
1936 	lockdep_assert_held(&nn->client_lock);
1937 
1938 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1939 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1940 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1941 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
1942 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1943 	renew_client_locked(clp);
1944 }
1945 
1946 static struct nfs4_client *
1947 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1948 {
1949 	struct nfs4_client *clp;
1950 	unsigned int idhashval = clientid_hashval(clid->cl_id);
1951 
1952 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1953 		if (same_clid(&clp->cl_clientid, clid)) {
1954 			if ((bool)clp->cl_minorversion != sessions)
1955 				return NULL;
1956 			renew_client_locked(clp);
1957 			return clp;
1958 		}
1959 	}
1960 	return NULL;
1961 }
1962 
1963 static struct nfs4_client *
1964 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1965 {
1966 	struct list_head *tbl = nn->conf_id_hashtbl;
1967 
1968 	lockdep_assert_held(&nn->client_lock);
1969 	return find_client_in_id_table(tbl, clid, sessions);
1970 }
1971 
1972 static struct nfs4_client *
1973 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1974 {
1975 	struct list_head *tbl = nn->unconf_id_hashtbl;
1976 
1977 	lockdep_assert_held(&nn->client_lock);
1978 	return find_client_in_id_table(tbl, clid, sessions);
1979 }
1980 
1981 static bool clp_used_exchangeid(struct nfs4_client *clp)
1982 {
1983 	return clp->cl_exchange_flags != 0;
1984 }
1985 
1986 static struct nfs4_client *
1987 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1988 {
1989 	lockdep_assert_held(&nn->client_lock);
1990 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
1991 }
1992 
1993 static struct nfs4_client *
1994 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1995 {
1996 	lockdep_assert_held(&nn->client_lock);
1997 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1998 }
1999 
2000 static void
2001 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2002 {
2003 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2004 	struct sockaddr	*sa = svc_addr(rqstp);
2005 	u32 scopeid = rpc_get_scope_id(sa);
2006 	unsigned short expected_family;
2007 
2008 	/* Currently, we only support tcp and tcp6 for the callback channel */
2009 	if (se->se_callback_netid_len == 3 &&
2010 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2011 		expected_family = AF_INET;
2012 	else if (se->se_callback_netid_len == 4 &&
2013 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2014 		expected_family = AF_INET6;
2015 	else
2016 		goto out_err;
2017 
2018 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2019 					    se->se_callback_addr_len,
2020 					    (struct sockaddr *)&conn->cb_addr,
2021 					    sizeof(conn->cb_addr));
2022 
2023 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2024 		goto out_err;
2025 
2026 	if (conn->cb_addr.ss_family == AF_INET6)
2027 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2028 
2029 	conn->cb_prog = se->se_callback_prog;
2030 	conn->cb_ident = se->se_callback_ident;
2031 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2032 	return;
2033 out_err:
2034 	conn->cb_addr.ss_family = AF_UNSPEC;
2035 	conn->cb_addrlen = 0;
2036 	dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
2037 		"will not receive delegations\n",
2038 		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2039 
2040 	return;
2041 }
2042 
2043 /*
2044  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2045  */
2046 static void
2047 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2048 {
2049 	struct xdr_buf *buf = resp->xdr.buf;
2050 	struct nfsd4_slot *slot = resp->cstate.slot;
2051 	unsigned int base;
2052 
2053 	dprintk("--> %s slot %p\n", __func__, slot);
2054 
2055 	slot->sl_opcnt = resp->opcnt;
2056 	slot->sl_status = resp->cstate.status;
2057 
2058 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2059 	if (nfsd4_not_cached(resp)) {
2060 		slot->sl_datalen = 0;
2061 		return;
2062 	}
2063 	base = resp->cstate.data_offset;
2064 	slot->sl_datalen = buf->len - base;
2065 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2066 		WARN("%s: sessions DRC could not cache compound\n", __func__);
2067 	return;
2068 }
2069 
2070 /*
2071  * Encode the replay sequence operation from the slot values.
2072  * If cachethis is FALSE encode the uncached rep error on the next
2073  * operation which sets resp->p and increments resp->opcnt for
2074  * nfs4svc_encode_compoundres.
2075  *
2076  */
2077 static __be32
2078 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2079 			  struct nfsd4_compoundres *resp)
2080 {
2081 	struct nfsd4_op *op;
2082 	struct nfsd4_slot *slot = resp->cstate.slot;
2083 
2084 	/* Encode the replayed sequence operation */
2085 	op = &args->ops[resp->opcnt - 1];
2086 	nfsd4_encode_operation(resp, op);
2087 
2088 	/* Return nfserr_retry_uncached_rep in next operation. */
2089 	if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2090 		op = &args->ops[resp->opcnt++];
2091 		op->status = nfserr_retry_uncached_rep;
2092 		nfsd4_encode_operation(resp, op);
2093 	}
2094 	return op->status;
2095 }
2096 
2097 /*
2098  * The sequence operation is not cached because we can use the slot and
2099  * session values.
2100  */
2101 static __be32
2102 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2103 			 struct nfsd4_sequence *seq)
2104 {
2105 	struct nfsd4_slot *slot = resp->cstate.slot;
2106 	struct xdr_stream *xdr = &resp->xdr;
2107 	__be32 *p;
2108 	__be32 status;
2109 
2110 	dprintk("--> %s slot %p\n", __func__, slot);
2111 
2112 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2113 	if (status)
2114 		return status;
2115 
2116 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2117 	if (!p) {
2118 		WARN_ON_ONCE(1);
2119 		return nfserr_serverfault;
2120 	}
2121 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2122 	xdr_commit_encode(xdr);
2123 
2124 	resp->opcnt = slot->sl_opcnt;
2125 	return slot->sl_status;
2126 }
2127 
2128 /*
2129  * Set the exchange_id flags returned by the server.
2130  */
2131 static void
2132 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2133 {
2134 #ifdef CONFIG_NFSD_PNFS
2135 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2136 #else
2137 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2138 #endif
2139 
2140 	/* Referrals are supported, Migration is not. */
2141 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2142 
2143 	/* set the wire flags to return to client. */
2144 	clid->flags = new->cl_exchange_flags;
2145 }
2146 
2147 static bool client_has_state(struct nfs4_client *clp)
2148 {
2149 	/*
2150 	 * Note clp->cl_openowners check isn't quite right: there's no
2151 	 * need to count owners without stateid's.
2152 	 *
2153 	 * Also note we should probably be using this in 4.0 case too.
2154 	 */
2155 	return !list_empty(&clp->cl_openowners)
2156 		|| !list_empty(&clp->cl_delegations)
2157 		|| !list_empty(&clp->cl_sessions);
2158 }
2159 
2160 __be32
2161 nfsd4_exchange_id(struct svc_rqst *rqstp,
2162 		  struct nfsd4_compound_state *cstate,
2163 		  struct nfsd4_exchange_id *exid)
2164 {
2165 	struct nfs4_client *conf, *new;
2166 	struct nfs4_client *unconf = NULL;
2167 	__be32 status;
2168 	char			addr_str[INET6_ADDRSTRLEN];
2169 	nfs4_verifier		verf = exid->verifier;
2170 	struct sockaddr		*sa = svc_addr(rqstp);
2171 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2172 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2173 
2174 	rpc_ntop(sa, addr_str, sizeof(addr_str));
2175 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2176 		"ip_addr=%s flags %x, spa_how %d\n",
2177 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
2178 		addr_str, exid->flags, exid->spa_how);
2179 
2180 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2181 		return nfserr_inval;
2182 
2183 	switch (exid->spa_how) {
2184 	case SP4_MACH_CRED:
2185 		if (!svc_rqst_integrity_protected(rqstp))
2186 			return nfserr_inval;
2187 	case SP4_NONE:
2188 		break;
2189 	default:				/* checked by xdr code */
2190 		WARN_ON_ONCE(1);
2191 	case SP4_SSV:
2192 		return nfserr_encr_alg_unsupp;
2193 	}
2194 
2195 	new = create_client(exid->clname, rqstp, &verf);
2196 	if (new == NULL)
2197 		return nfserr_jukebox;
2198 
2199 	/* Cases below refer to rfc 5661 section 18.35.4: */
2200 	spin_lock(&nn->client_lock);
2201 	conf = find_confirmed_client_by_name(&exid->clname, nn);
2202 	if (conf) {
2203 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2204 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2205 
2206 		if (update) {
2207 			if (!clp_used_exchangeid(conf)) { /* buggy client */
2208 				status = nfserr_inval;
2209 				goto out;
2210 			}
2211 			if (!mach_creds_match(conf, rqstp)) {
2212 				status = nfserr_wrong_cred;
2213 				goto out;
2214 			}
2215 			if (!creds_match) { /* case 9 */
2216 				status = nfserr_perm;
2217 				goto out;
2218 			}
2219 			if (!verfs_match) { /* case 8 */
2220 				status = nfserr_not_same;
2221 				goto out;
2222 			}
2223 			/* case 6 */
2224 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2225 			goto out_copy;
2226 		}
2227 		if (!creds_match) { /* case 3 */
2228 			if (client_has_state(conf)) {
2229 				status = nfserr_clid_inuse;
2230 				goto out;
2231 			}
2232 			goto out_new;
2233 		}
2234 		if (verfs_match) { /* case 2 */
2235 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2236 			goto out_copy;
2237 		}
2238 		/* case 5, client reboot */
2239 		conf = NULL;
2240 		goto out_new;
2241 	}
2242 
2243 	if (update) { /* case 7 */
2244 		status = nfserr_noent;
2245 		goto out;
2246 	}
2247 
2248 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2249 	if (unconf) /* case 4, possible retry or client restart */
2250 		unhash_client_locked(unconf);
2251 
2252 	/* case 1 (normal case) */
2253 out_new:
2254 	if (conf) {
2255 		status = mark_client_expired_locked(conf);
2256 		if (status)
2257 			goto out;
2258 	}
2259 	new->cl_minorversion = cstate->minorversion;
2260 	new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
2261 
2262 	gen_clid(new, nn);
2263 	add_to_unconfirmed(new);
2264 	swap(new, conf);
2265 out_copy:
2266 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2267 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
2268 
2269 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2270 	nfsd4_set_ex_flags(conf, exid);
2271 
2272 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2273 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2274 	status = nfs_ok;
2275 
2276 out:
2277 	spin_unlock(&nn->client_lock);
2278 	if (new)
2279 		expire_client(new);
2280 	if (unconf)
2281 		expire_client(unconf);
2282 	return status;
2283 }
2284 
2285 static __be32
2286 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2287 {
2288 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2289 		slot_seqid);
2290 
2291 	/* The slot is in use, and no response has been sent. */
2292 	if (slot_inuse) {
2293 		if (seqid == slot_seqid)
2294 			return nfserr_jukebox;
2295 		else
2296 			return nfserr_seq_misordered;
2297 	}
2298 	/* Note unsigned 32-bit arithmetic handles wraparound: */
2299 	if (likely(seqid == slot_seqid + 1))
2300 		return nfs_ok;
2301 	if (seqid == slot_seqid)
2302 		return nfserr_replay_cache;
2303 	return nfserr_seq_misordered;
2304 }
2305 
2306 /*
2307  * Cache the create session result into the create session single DRC
2308  * slot cache by saving the xdr structure. sl_seqid has been set.
2309  * Do this for solo or embedded create session operations.
2310  */
2311 static void
2312 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2313 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2314 {
2315 	slot->sl_status = nfserr;
2316 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2317 }
2318 
2319 static __be32
2320 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2321 			    struct nfsd4_clid_slot *slot)
2322 {
2323 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2324 	return slot->sl_status;
2325 }
2326 
2327 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
2328 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2329 			1 +	/* MIN tag is length with zero, only length */ \
2330 			3 +	/* version, opcount, opcode */ \
2331 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2332 				/* seqid, slotID, slotID, cache */ \
2333 			4 ) * sizeof(__be32))
2334 
2335 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2336 			2 +	/* verifier: AUTH_NULL, length 0 */\
2337 			1 +	/* status */ \
2338 			1 +	/* MIN tag is length with zero, only length */ \
2339 			3 +	/* opcount, opcode, opstatus*/ \
2340 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2341 				/* seqid, slotID, slotID, slotID, status */ \
2342 			5 ) * sizeof(__be32))
2343 
2344 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2345 {
2346 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2347 
2348 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2349 		return nfserr_toosmall;
2350 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2351 		return nfserr_toosmall;
2352 	ca->headerpadsz = 0;
2353 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2354 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2355 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2356 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2357 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2358 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2359 	/*
2360 	 * Note decreasing slot size below client's request may make it
2361 	 * difficult for client to function correctly, whereas
2362 	 * decreasing the number of slots will (just?) affect
2363 	 * performance.  When short on memory we therefore prefer to
2364 	 * decrease number of slots instead of their size.  Clients that
2365 	 * request larger slots than they need will get poor results:
2366 	 */
2367 	ca->maxreqs = nfsd4_get_drc_mem(ca);
2368 	if (!ca->maxreqs)
2369 		return nfserr_jukebox;
2370 
2371 	return nfs_ok;
2372 }
2373 
2374 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
2375 				 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2376 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
2377 				 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2378 
2379 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2380 {
2381 	ca->headerpadsz = 0;
2382 
2383 	/*
2384 	 * These RPC_MAX_HEADER macros are overkill, especially since we
2385 	 * don't even do gss on the backchannel yet.  But this is still
2386 	 * less than 1k.  Tighten up this estimate in the unlikely event
2387 	 * it turns out to be a problem for some client:
2388 	 */
2389 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2390 		return nfserr_toosmall;
2391 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2392 		return nfserr_toosmall;
2393 	ca->maxresp_cached = 0;
2394 	if (ca->maxops < 2)
2395 		return nfserr_toosmall;
2396 
2397 	return nfs_ok;
2398 }
2399 
2400 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2401 {
2402 	switch (cbs->flavor) {
2403 	case RPC_AUTH_NULL:
2404 	case RPC_AUTH_UNIX:
2405 		return nfs_ok;
2406 	default:
2407 		/*
2408 		 * GSS case: the spec doesn't allow us to return this
2409 		 * error.  But it also doesn't allow us not to support
2410 		 * GSS.
2411 		 * I'd rather this fail hard than return some error the
2412 		 * client might think it can already handle:
2413 		 */
2414 		return nfserr_encr_alg_unsupp;
2415 	}
2416 }
2417 
2418 __be32
2419 nfsd4_create_session(struct svc_rqst *rqstp,
2420 		     struct nfsd4_compound_state *cstate,
2421 		     struct nfsd4_create_session *cr_ses)
2422 {
2423 	struct sockaddr *sa = svc_addr(rqstp);
2424 	struct nfs4_client *conf, *unconf;
2425 	struct nfs4_client *old = NULL;
2426 	struct nfsd4_session *new;
2427 	struct nfsd4_conn *conn;
2428 	struct nfsd4_clid_slot *cs_slot = NULL;
2429 	__be32 status = 0;
2430 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2431 
2432 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2433 		return nfserr_inval;
2434 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2435 	if (status)
2436 		return status;
2437 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2438 	if (status)
2439 		return status;
2440 	status = check_backchannel_attrs(&cr_ses->back_channel);
2441 	if (status)
2442 		goto out_release_drc_mem;
2443 	status = nfserr_jukebox;
2444 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2445 	if (!new)
2446 		goto out_release_drc_mem;
2447 	conn = alloc_conn_from_crses(rqstp, cr_ses);
2448 	if (!conn)
2449 		goto out_free_session;
2450 
2451 	spin_lock(&nn->client_lock);
2452 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2453 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2454 	WARN_ON_ONCE(conf && unconf);
2455 
2456 	if (conf) {
2457 		status = nfserr_wrong_cred;
2458 		if (!mach_creds_match(conf, rqstp))
2459 			goto out_free_conn;
2460 		cs_slot = &conf->cl_cs_slot;
2461 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2462 		if (status == nfserr_replay_cache) {
2463 			status = nfsd4_replay_create_session(cr_ses, cs_slot);
2464 			goto out_free_conn;
2465 		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
2466 			status = nfserr_seq_misordered;
2467 			goto out_free_conn;
2468 		}
2469 	} else if (unconf) {
2470 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2471 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2472 			status = nfserr_clid_inuse;
2473 			goto out_free_conn;
2474 		}
2475 		status = nfserr_wrong_cred;
2476 		if (!mach_creds_match(unconf, rqstp))
2477 			goto out_free_conn;
2478 		cs_slot = &unconf->cl_cs_slot;
2479 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2480 		if (status) {
2481 			/* an unconfirmed replay returns misordered */
2482 			status = nfserr_seq_misordered;
2483 			goto out_free_conn;
2484 		}
2485 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2486 		if (old) {
2487 			status = mark_client_expired_locked(old);
2488 			if (status) {
2489 				old = NULL;
2490 				goto out_free_conn;
2491 			}
2492 		}
2493 		move_to_confirmed(unconf);
2494 		conf = unconf;
2495 	} else {
2496 		status = nfserr_stale_clientid;
2497 		goto out_free_conn;
2498 	}
2499 	status = nfs_ok;
2500 	/*
2501 	 * We do not support RDMA or persistent sessions
2502 	 */
2503 	cr_ses->flags &= ~SESSION4_PERSIST;
2504 	cr_ses->flags &= ~SESSION4_RDMA;
2505 
2506 	init_session(rqstp, new, conf, cr_ses);
2507 	nfsd4_get_session_locked(new);
2508 
2509 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2510 	       NFS4_MAX_SESSIONID_LEN);
2511 	cs_slot->sl_seqid++;
2512 	cr_ses->seqid = cs_slot->sl_seqid;
2513 
2514 	/* cache solo and embedded create sessions under the client_lock */
2515 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
2516 	spin_unlock(&nn->client_lock);
2517 	/* init connection and backchannel */
2518 	nfsd4_init_conn(rqstp, conn, new);
2519 	nfsd4_put_session(new);
2520 	if (old)
2521 		expire_client(old);
2522 	return status;
2523 out_free_conn:
2524 	spin_unlock(&nn->client_lock);
2525 	free_conn(conn);
2526 	if (old)
2527 		expire_client(old);
2528 out_free_session:
2529 	__free_session(new);
2530 out_release_drc_mem:
2531 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
2532 	return status;
2533 }
2534 
2535 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2536 {
2537 	switch (*dir) {
2538 	case NFS4_CDFC4_FORE:
2539 	case NFS4_CDFC4_BACK:
2540 		return nfs_ok;
2541 	case NFS4_CDFC4_FORE_OR_BOTH:
2542 	case NFS4_CDFC4_BACK_OR_BOTH:
2543 		*dir = NFS4_CDFC4_BOTH;
2544 		return nfs_ok;
2545 	};
2546 	return nfserr_inval;
2547 }
2548 
2549 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2550 {
2551 	struct nfsd4_session *session = cstate->session;
2552 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2553 	__be32 status;
2554 
2555 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2556 	if (status)
2557 		return status;
2558 	spin_lock(&nn->client_lock);
2559 	session->se_cb_prog = bc->bc_cb_program;
2560 	session->se_cb_sec = bc->bc_cb_sec;
2561 	spin_unlock(&nn->client_lock);
2562 
2563 	nfsd4_probe_callback(session->se_client);
2564 
2565 	return nfs_ok;
2566 }
2567 
2568 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2569 		     struct nfsd4_compound_state *cstate,
2570 		     struct nfsd4_bind_conn_to_session *bcts)
2571 {
2572 	__be32 status;
2573 	struct nfsd4_conn *conn;
2574 	struct nfsd4_session *session;
2575 	struct net *net = SVC_NET(rqstp);
2576 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2577 
2578 	if (!nfsd4_last_compound_op(rqstp))
2579 		return nfserr_not_only_op;
2580 	spin_lock(&nn->client_lock);
2581 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2582 	spin_unlock(&nn->client_lock);
2583 	if (!session)
2584 		goto out_no_session;
2585 	status = nfserr_wrong_cred;
2586 	if (!mach_creds_match(session->se_client, rqstp))
2587 		goto out;
2588 	status = nfsd4_map_bcts_dir(&bcts->dir);
2589 	if (status)
2590 		goto out;
2591 	conn = alloc_conn(rqstp, bcts->dir);
2592 	status = nfserr_jukebox;
2593 	if (!conn)
2594 		goto out;
2595 	nfsd4_init_conn(rqstp, conn, session);
2596 	status = nfs_ok;
2597 out:
2598 	nfsd4_put_session(session);
2599 out_no_session:
2600 	return status;
2601 }
2602 
2603 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2604 {
2605 	if (!session)
2606 		return 0;
2607 	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2608 }
2609 
2610 __be32
2611 nfsd4_destroy_session(struct svc_rqst *r,
2612 		      struct nfsd4_compound_state *cstate,
2613 		      struct nfsd4_destroy_session *sessionid)
2614 {
2615 	struct nfsd4_session *ses;
2616 	__be32 status;
2617 	int ref_held_by_me = 0;
2618 	struct net *net = SVC_NET(r);
2619 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2620 
2621 	status = nfserr_not_only_op;
2622 	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2623 		if (!nfsd4_last_compound_op(r))
2624 			goto out;
2625 		ref_held_by_me++;
2626 	}
2627 	dump_sessionid(__func__, &sessionid->sessionid);
2628 	spin_lock(&nn->client_lock);
2629 	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2630 	if (!ses)
2631 		goto out_client_lock;
2632 	status = nfserr_wrong_cred;
2633 	if (!mach_creds_match(ses->se_client, r))
2634 		goto out_put_session;
2635 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2636 	if (status)
2637 		goto out_put_session;
2638 	unhash_session(ses);
2639 	spin_unlock(&nn->client_lock);
2640 
2641 	nfsd4_probe_callback_sync(ses->se_client);
2642 
2643 	spin_lock(&nn->client_lock);
2644 	status = nfs_ok;
2645 out_put_session:
2646 	nfsd4_put_session_locked(ses);
2647 out_client_lock:
2648 	spin_unlock(&nn->client_lock);
2649 out:
2650 	return status;
2651 }
2652 
2653 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2654 {
2655 	struct nfsd4_conn *c;
2656 
2657 	list_for_each_entry(c, &s->se_conns, cn_persession) {
2658 		if (c->cn_xprt == xpt) {
2659 			return c;
2660 		}
2661 	}
2662 	return NULL;
2663 }
2664 
2665 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2666 {
2667 	struct nfs4_client *clp = ses->se_client;
2668 	struct nfsd4_conn *c;
2669 	__be32 status = nfs_ok;
2670 	int ret;
2671 
2672 	spin_lock(&clp->cl_lock);
2673 	c = __nfsd4_find_conn(new->cn_xprt, ses);
2674 	if (c)
2675 		goto out_free;
2676 	status = nfserr_conn_not_bound_to_session;
2677 	if (clp->cl_mach_cred)
2678 		goto out_free;
2679 	__nfsd4_hash_conn(new, ses);
2680 	spin_unlock(&clp->cl_lock);
2681 	ret = nfsd4_register_conn(new);
2682 	if (ret)
2683 		/* oops; xprt is already down: */
2684 		nfsd4_conn_lost(&new->cn_xpt_user);
2685 	return nfs_ok;
2686 out_free:
2687 	spin_unlock(&clp->cl_lock);
2688 	free_conn(new);
2689 	return status;
2690 }
2691 
2692 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2693 {
2694 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
2695 
2696 	return args->opcnt > session->se_fchannel.maxops;
2697 }
2698 
2699 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2700 				  struct nfsd4_session *session)
2701 {
2702 	struct xdr_buf *xb = &rqstp->rq_arg;
2703 
2704 	return xb->len > session->se_fchannel.maxreq_sz;
2705 }
2706 
2707 __be32
2708 nfsd4_sequence(struct svc_rqst *rqstp,
2709 	       struct nfsd4_compound_state *cstate,
2710 	       struct nfsd4_sequence *seq)
2711 {
2712 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
2713 	struct xdr_stream *xdr = &resp->xdr;
2714 	struct nfsd4_session *session;
2715 	struct nfs4_client *clp;
2716 	struct nfsd4_slot *slot;
2717 	struct nfsd4_conn *conn;
2718 	__be32 status;
2719 	int buflen;
2720 	struct net *net = SVC_NET(rqstp);
2721 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2722 
2723 	if (resp->opcnt != 1)
2724 		return nfserr_sequence_pos;
2725 
2726 	/*
2727 	 * Will be either used or freed by nfsd4_sequence_check_conn
2728 	 * below.
2729 	 */
2730 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2731 	if (!conn)
2732 		return nfserr_jukebox;
2733 
2734 	spin_lock(&nn->client_lock);
2735 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2736 	if (!session)
2737 		goto out_no_session;
2738 	clp = session->se_client;
2739 
2740 	status = nfserr_too_many_ops;
2741 	if (nfsd4_session_too_many_ops(rqstp, session))
2742 		goto out_put_session;
2743 
2744 	status = nfserr_req_too_big;
2745 	if (nfsd4_request_too_big(rqstp, session))
2746 		goto out_put_session;
2747 
2748 	status = nfserr_badslot;
2749 	if (seq->slotid >= session->se_fchannel.maxreqs)
2750 		goto out_put_session;
2751 
2752 	slot = session->se_slots[seq->slotid];
2753 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
2754 
2755 	/* We do not negotiate the number of slots yet, so set the
2756 	 * maxslots to the session maxreqs which is used to encode
2757 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
2758 	seq->maxslots = session->se_fchannel.maxreqs;
2759 
2760 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2761 					slot->sl_flags & NFSD4_SLOT_INUSE);
2762 	if (status == nfserr_replay_cache) {
2763 		status = nfserr_seq_misordered;
2764 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2765 			goto out_put_session;
2766 		cstate->slot = slot;
2767 		cstate->session = session;
2768 		cstate->clp = clp;
2769 		/* Return the cached reply status and set cstate->status
2770 		 * for nfsd4_proc_compound processing */
2771 		status = nfsd4_replay_cache_entry(resp, seq);
2772 		cstate->status = nfserr_replay_cache;
2773 		goto out;
2774 	}
2775 	if (status)
2776 		goto out_put_session;
2777 
2778 	status = nfsd4_sequence_check_conn(conn, session);
2779 	conn = NULL;
2780 	if (status)
2781 		goto out_put_session;
2782 
2783 	buflen = (seq->cachethis) ?
2784 			session->se_fchannel.maxresp_cached :
2785 			session->se_fchannel.maxresp_sz;
2786 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2787 				    nfserr_rep_too_big;
2788 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2789 		goto out_put_session;
2790 	svc_reserve(rqstp, buflen);
2791 
2792 	status = nfs_ok;
2793 	/* Success! bump slot seqid */
2794 	slot->sl_seqid = seq->seqid;
2795 	slot->sl_flags |= NFSD4_SLOT_INUSE;
2796 	if (seq->cachethis)
2797 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2798 	else
2799 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2800 
2801 	cstate->slot = slot;
2802 	cstate->session = session;
2803 	cstate->clp = clp;
2804 
2805 out:
2806 	switch (clp->cl_cb_state) {
2807 	case NFSD4_CB_DOWN:
2808 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2809 		break;
2810 	case NFSD4_CB_FAULT:
2811 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2812 		break;
2813 	default:
2814 		seq->status_flags = 0;
2815 	}
2816 	if (!list_empty(&clp->cl_revoked))
2817 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2818 out_no_session:
2819 	if (conn)
2820 		free_conn(conn);
2821 	spin_unlock(&nn->client_lock);
2822 	return status;
2823 out_put_session:
2824 	nfsd4_put_session_locked(session);
2825 	goto out_no_session;
2826 }
2827 
2828 void
2829 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2830 {
2831 	struct nfsd4_compound_state *cs = &resp->cstate;
2832 
2833 	if (nfsd4_has_session(cs)) {
2834 		if (cs->status != nfserr_replay_cache) {
2835 			nfsd4_store_cache_entry(resp);
2836 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2837 		}
2838 		/* Drop session reference that was taken in nfsd4_sequence() */
2839 		nfsd4_put_session(cs->session);
2840 	} else if (cs->clp)
2841 		put_client_renew(cs->clp);
2842 }
2843 
2844 __be32
2845 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2846 {
2847 	struct nfs4_client *conf, *unconf;
2848 	struct nfs4_client *clp = NULL;
2849 	__be32 status = 0;
2850 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2851 
2852 	spin_lock(&nn->client_lock);
2853 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2854 	conf = find_confirmed_client(&dc->clientid, true, nn);
2855 	WARN_ON_ONCE(conf && unconf);
2856 
2857 	if (conf) {
2858 		if (client_has_state(conf)) {
2859 			status = nfserr_clientid_busy;
2860 			goto out;
2861 		}
2862 		status = mark_client_expired_locked(conf);
2863 		if (status)
2864 			goto out;
2865 		clp = conf;
2866 	} else if (unconf)
2867 		clp = unconf;
2868 	else {
2869 		status = nfserr_stale_clientid;
2870 		goto out;
2871 	}
2872 	if (!mach_creds_match(clp, rqstp)) {
2873 		clp = NULL;
2874 		status = nfserr_wrong_cred;
2875 		goto out;
2876 	}
2877 	unhash_client_locked(clp);
2878 out:
2879 	spin_unlock(&nn->client_lock);
2880 	if (clp)
2881 		expire_client(clp);
2882 	return status;
2883 }
2884 
2885 __be32
2886 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2887 {
2888 	__be32 status = 0;
2889 
2890 	if (rc->rca_one_fs) {
2891 		if (!cstate->current_fh.fh_dentry)
2892 			return nfserr_nofilehandle;
2893 		/*
2894 		 * We don't take advantage of the rca_one_fs case.
2895 		 * That's OK, it's optional, we can safely ignore it.
2896 		 */
2897 		 return nfs_ok;
2898 	}
2899 
2900 	status = nfserr_complete_already;
2901 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2902 			     &cstate->session->se_client->cl_flags))
2903 		goto out;
2904 
2905 	status = nfserr_stale_clientid;
2906 	if (is_client_expired(cstate->session->se_client))
2907 		/*
2908 		 * The following error isn't really legal.
2909 		 * But we only get here if the client just explicitly
2910 		 * destroyed the client.  Surely it no longer cares what
2911 		 * error it gets back on an operation for the dead
2912 		 * client.
2913 		 */
2914 		goto out;
2915 
2916 	status = nfs_ok;
2917 	nfsd4_client_record_create(cstate->session->se_client);
2918 out:
2919 	return status;
2920 }
2921 
2922 __be32
2923 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2924 		  struct nfsd4_setclientid *setclid)
2925 {
2926 	struct xdr_netobj 	clname = setclid->se_name;
2927 	nfs4_verifier		clverifier = setclid->se_verf;
2928 	struct nfs4_client	*conf, *new;
2929 	struct nfs4_client	*unconf = NULL;
2930 	__be32 			status;
2931 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2932 
2933 	new = create_client(clname, rqstp, &clverifier);
2934 	if (new == NULL)
2935 		return nfserr_jukebox;
2936 	/* Cases below refer to rfc 3530 section 14.2.33: */
2937 	spin_lock(&nn->client_lock);
2938 	conf = find_confirmed_client_by_name(&clname, nn);
2939 	if (conf) {
2940 		/* case 0: */
2941 		status = nfserr_clid_inuse;
2942 		if (clp_used_exchangeid(conf))
2943 			goto out;
2944 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2945 			char addr_str[INET6_ADDRSTRLEN];
2946 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2947 				 sizeof(addr_str));
2948 			dprintk("NFSD: setclientid: string in use by client "
2949 				"at %s\n", addr_str);
2950 			goto out;
2951 		}
2952 	}
2953 	unconf = find_unconfirmed_client_by_name(&clname, nn);
2954 	if (unconf)
2955 		unhash_client_locked(unconf);
2956 	if (conf && same_verf(&conf->cl_verifier, &clverifier))
2957 		/* case 1: probable callback update */
2958 		copy_clid(new, conf);
2959 	else /* case 4 (new client) or cases 2, 3 (client reboot): */
2960 		gen_clid(new, nn);
2961 	new->cl_minorversion = 0;
2962 	gen_callback(new, setclid, rqstp);
2963 	add_to_unconfirmed(new);
2964 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2965 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2966 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2967 	new = NULL;
2968 	status = nfs_ok;
2969 out:
2970 	spin_unlock(&nn->client_lock);
2971 	if (new)
2972 		free_client(new);
2973 	if (unconf)
2974 		expire_client(unconf);
2975 	return status;
2976 }
2977 
2978 
2979 __be32
2980 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2981 			 struct nfsd4_compound_state *cstate,
2982 			 struct nfsd4_setclientid_confirm *setclientid_confirm)
2983 {
2984 	struct nfs4_client *conf, *unconf;
2985 	struct nfs4_client *old = NULL;
2986 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2987 	clientid_t * clid = &setclientid_confirm->sc_clientid;
2988 	__be32 status;
2989 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2990 
2991 	if (STALE_CLIENTID(clid, nn))
2992 		return nfserr_stale_clientid;
2993 
2994 	spin_lock(&nn->client_lock);
2995 	conf = find_confirmed_client(clid, false, nn);
2996 	unconf = find_unconfirmed_client(clid, false, nn);
2997 	/*
2998 	 * We try hard to give out unique clientid's, so if we get an
2999 	 * attempt to confirm the same clientid with a different cred,
3000 	 * there's a bug somewhere.  Let's charitably assume it's our
3001 	 * bug.
3002 	 */
3003 	status = nfserr_serverfault;
3004 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3005 		goto out;
3006 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3007 		goto out;
3008 	/* cases below refer to rfc 3530 section 14.2.34: */
3009 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3010 		if (conf && !unconf) /* case 2: probable retransmit */
3011 			status = nfs_ok;
3012 		else /* case 4: client hasn't noticed we rebooted yet? */
3013 			status = nfserr_stale_clientid;
3014 		goto out;
3015 	}
3016 	status = nfs_ok;
3017 	if (conf) { /* case 1: callback update */
3018 		old = unconf;
3019 		unhash_client_locked(old);
3020 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3021 	} else { /* case 3: normal case; new or rebooted client */
3022 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3023 		if (old) {
3024 			status = mark_client_expired_locked(old);
3025 			if (status) {
3026 				old = NULL;
3027 				goto out;
3028 			}
3029 		}
3030 		move_to_confirmed(unconf);
3031 		conf = unconf;
3032 	}
3033 	get_client_locked(conf);
3034 	spin_unlock(&nn->client_lock);
3035 	nfsd4_probe_callback(conf);
3036 	spin_lock(&nn->client_lock);
3037 	put_client_renew_locked(conf);
3038 out:
3039 	spin_unlock(&nn->client_lock);
3040 	if (old)
3041 		expire_client(old);
3042 	return status;
3043 }
3044 
3045 static struct nfs4_file *nfsd4_alloc_file(void)
3046 {
3047 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
3048 }
3049 
3050 /* OPEN Share state helper functions */
3051 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3052 				struct nfs4_file *fp)
3053 {
3054 	lockdep_assert_held(&state_lock);
3055 
3056 	atomic_set(&fp->fi_ref, 1);
3057 	spin_lock_init(&fp->fi_lock);
3058 	INIT_LIST_HEAD(&fp->fi_stateids);
3059 	INIT_LIST_HEAD(&fp->fi_delegations);
3060 	fh_copy_shallow(&fp->fi_fhandle, fh);
3061 	fp->fi_deleg_file = NULL;
3062 	fp->fi_had_conflict = false;
3063 	fp->fi_share_deny = 0;
3064 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3065 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
3066 #ifdef CONFIG_NFSD_PNFS
3067 	INIT_LIST_HEAD(&fp->fi_lo_states);
3068 	atomic_set(&fp->fi_lo_recalls, 0);
3069 #endif
3070 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3071 }
3072 
3073 void
3074 nfsd4_free_slabs(void)
3075 {
3076 	kmem_cache_destroy(openowner_slab);
3077 	kmem_cache_destroy(lockowner_slab);
3078 	kmem_cache_destroy(file_slab);
3079 	kmem_cache_destroy(stateid_slab);
3080 	kmem_cache_destroy(deleg_slab);
3081 }
3082 
3083 int
3084 nfsd4_init_slabs(void)
3085 {
3086 	openowner_slab = kmem_cache_create("nfsd4_openowners",
3087 			sizeof(struct nfs4_openowner), 0, 0, NULL);
3088 	if (openowner_slab == NULL)
3089 		goto out;
3090 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3091 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
3092 	if (lockowner_slab == NULL)
3093 		goto out_free_openowner_slab;
3094 	file_slab = kmem_cache_create("nfsd4_files",
3095 			sizeof(struct nfs4_file), 0, 0, NULL);
3096 	if (file_slab == NULL)
3097 		goto out_free_lockowner_slab;
3098 	stateid_slab = kmem_cache_create("nfsd4_stateids",
3099 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3100 	if (stateid_slab == NULL)
3101 		goto out_free_file_slab;
3102 	deleg_slab = kmem_cache_create("nfsd4_delegations",
3103 			sizeof(struct nfs4_delegation), 0, 0, NULL);
3104 	if (deleg_slab == NULL)
3105 		goto out_free_stateid_slab;
3106 	return 0;
3107 
3108 out_free_stateid_slab:
3109 	kmem_cache_destroy(stateid_slab);
3110 out_free_file_slab:
3111 	kmem_cache_destroy(file_slab);
3112 out_free_lockowner_slab:
3113 	kmem_cache_destroy(lockowner_slab);
3114 out_free_openowner_slab:
3115 	kmem_cache_destroy(openowner_slab);
3116 out:
3117 	dprintk("nfsd4: out of memory while initializing nfsv4\n");
3118 	return -ENOMEM;
3119 }
3120 
3121 static void init_nfs4_replay(struct nfs4_replay *rp)
3122 {
3123 	rp->rp_status = nfserr_serverfault;
3124 	rp->rp_buflen = 0;
3125 	rp->rp_buf = rp->rp_ibuf;
3126 	mutex_init(&rp->rp_mutex);
3127 }
3128 
3129 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3130 		struct nfs4_stateowner *so)
3131 {
3132 	if (!nfsd4_has_session(cstate)) {
3133 		mutex_lock(&so->so_replay.rp_mutex);
3134 		cstate->replay_owner = nfs4_get_stateowner(so);
3135 	}
3136 }
3137 
3138 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3139 {
3140 	struct nfs4_stateowner *so = cstate->replay_owner;
3141 
3142 	if (so != NULL) {
3143 		cstate->replay_owner = NULL;
3144 		mutex_unlock(&so->so_replay.rp_mutex);
3145 		nfs4_put_stateowner(so);
3146 	}
3147 }
3148 
3149 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3150 {
3151 	struct nfs4_stateowner *sop;
3152 
3153 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
3154 	if (!sop)
3155 		return NULL;
3156 
3157 	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3158 	if (!sop->so_owner.data) {
3159 		kmem_cache_free(slab, sop);
3160 		return NULL;
3161 	}
3162 	sop->so_owner.len = owner->len;
3163 
3164 	INIT_LIST_HEAD(&sop->so_stateids);
3165 	sop->so_client = clp;
3166 	init_nfs4_replay(&sop->so_replay);
3167 	atomic_set(&sop->so_count, 1);
3168 	return sop;
3169 }
3170 
3171 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3172 {
3173 	lockdep_assert_held(&clp->cl_lock);
3174 
3175 	list_add(&oo->oo_owner.so_strhash,
3176 		 &clp->cl_ownerstr_hashtbl[strhashval]);
3177 	list_add(&oo->oo_perclient, &clp->cl_openowners);
3178 }
3179 
3180 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3181 {
3182 	unhash_openowner_locked(openowner(so));
3183 }
3184 
3185 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3186 {
3187 	struct nfs4_openowner *oo = openowner(so);
3188 
3189 	kmem_cache_free(openowner_slab, oo);
3190 }
3191 
3192 static const struct nfs4_stateowner_operations openowner_ops = {
3193 	.so_unhash =	nfs4_unhash_openowner,
3194 	.so_free =	nfs4_free_openowner,
3195 };
3196 
3197 static struct nfs4_openowner *
3198 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3199 			   struct nfsd4_compound_state *cstate)
3200 {
3201 	struct nfs4_client *clp = cstate->clp;
3202 	struct nfs4_openowner *oo, *ret;
3203 
3204 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3205 	if (!oo)
3206 		return NULL;
3207 	oo->oo_owner.so_ops = &openowner_ops;
3208 	oo->oo_owner.so_is_open_owner = 1;
3209 	oo->oo_owner.so_seqid = open->op_seqid;
3210 	oo->oo_flags = 0;
3211 	if (nfsd4_has_session(cstate))
3212 		oo->oo_flags |= NFS4_OO_CONFIRMED;
3213 	oo->oo_time = 0;
3214 	oo->oo_last_closed_stid = NULL;
3215 	INIT_LIST_HEAD(&oo->oo_close_lru);
3216 	spin_lock(&clp->cl_lock);
3217 	ret = find_openstateowner_str_locked(strhashval, open, clp);
3218 	if (ret == NULL) {
3219 		hash_openowner(oo, clp, strhashval);
3220 		ret = oo;
3221 	} else
3222 		nfs4_free_openowner(&oo->oo_owner);
3223 	spin_unlock(&clp->cl_lock);
3224 	return ret;
3225 }
3226 
3227 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
3228 	struct nfs4_openowner *oo = open->op_openowner;
3229 
3230 	atomic_inc(&stp->st_stid.sc_count);
3231 	stp->st_stid.sc_type = NFS4_OPEN_STID;
3232 	INIT_LIST_HEAD(&stp->st_locks);
3233 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3234 	get_nfs4_file(fp);
3235 	stp->st_stid.sc_file = fp;
3236 	stp->st_access_bmap = 0;
3237 	stp->st_deny_bmap = 0;
3238 	stp->st_openstp = NULL;
3239 	spin_lock(&oo->oo_owner.so_client->cl_lock);
3240 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3241 	spin_lock(&fp->fi_lock);
3242 	list_add(&stp->st_perfile, &fp->fi_stateids);
3243 	spin_unlock(&fp->fi_lock);
3244 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
3245 }
3246 
3247 /*
3248  * In the 4.0 case we need to keep the owners around a little while to handle
3249  * CLOSE replay. We still do need to release any file access that is held by
3250  * them before returning however.
3251  */
3252 static void
3253 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3254 {
3255 	struct nfs4_ol_stateid *last;
3256 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
3257 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3258 						nfsd_net_id);
3259 
3260 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3261 
3262 	/*
3263 	 * We know that we hold one reference via nfsd4_close, and another
3264 	 * "persistent" reference for the client. If the refcount is higher
3265 	 * than 2, then there are still calls in progress that are using this
3266 	 * stateid. We can't put the sc_file reference until they are finished.
3267 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
3268 	 * there should be no danger of the refcount going back up again at
3269 	 * this point.
3270 	 */
3271 	wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3272 
3273 	release_all_access(s);
3274 	if (s->st_stid.sc_file) {
3275 		put_nfs4_file(s->st_stid.sc_file);
3276 		s->st_stid.sc_file = NULL;
3277 	}
3278 
3279 	spin_lock(&nn->client_lock);
3280 	last = oo->oo_last_closed_stid;
3281 	oo->oo_last_closed_stid = s;
3282 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3283 	oo->oo_time = get_seconds();
3284 	spin_unlock(&nn->client_lock);
3285 	if (last)
3286 		nfs4_put_stid(&last->st_stid);
3287 }
3288 
3289 /* search file_hashtbl[] for file */
3290 static struct nfs4_file *
3291 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3292 {
3293 	struct nfs4_file *fp;
3294 
3295 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3296 		if (fh_match(&fp->fi_fhandle, fh)) {
3297 			if (atomic_inc_not_zero(&fp->fi_ref))
3298 				return fp;
3299 		}
3300 	}
3301 	return NULL;
3302 }
3303 
3304 struct nfs4_file *
3305 find_file(struct knfsd_fh *fh)
3306 {
3307 	struct nfs4_file *fp;
3308 	unsigned int hashval = file_hashval(fh);
3309 
3310 	rcu_read_lock();
3311 	fp = find_file_locked(fh, hashval);
3312 	rcu_read_unlock();
3313 	return fp;
3314 }
3315 
3316 static struct nfs4_file *
3317 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3318 {
3319 	struct nfs4_file *fp;
3320 	unsigned int hashval = file_hashval(fh);
3321 
3322 	rcu_read_lock();
3323 	fp = find_file_locked(fh, hashval);
3324 	rcu_read_unlock();
3325 	if (fp)
3326 		return fp;
3327 
3328 	spin_lock(&state_lock);
3329 	fp = find_file_locked(fh, hashval);
3330 	if (likely(fp == NULL)) {
3331 		nfsd4_init_file(fh, hashval, new);
3332 		fp = new;
3333 	}
3334 	spin_unlock(&state_lock);
3335 
3336 	return fp;
3337 }
3338 
3339 /*
3340  * Called to check deny when READ with all zero stateid or
3341  * WRITE with all zero or all one stateid
3342  */
3343 static __be32
3344 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3345 {
3346 	struct nfs4_file *fp;
3347 	__be32 ret = nfs_ok;
3348 
3349 	fp = find_file(&current_fh->fh_handle);
3350 	if (!fp)
3351 		return ret;
3352 	/* Check for conflicting share reservations */
3353 	spin_lock(&fp->fi_lock);
3354 	if (fp->fi_share_deny & deny_type)
3355 		ret = nfserr_locked;
3356 	spin_unlock(&fp->fi_lock);
3357 	put_nfs4_file(fp);
3358 	return ret;
3359 }
3360 
3361 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3362 {
3363 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3364 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3365 					  nfsd_net_id);
3366 
3367 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3368 
3369 	/*
3370 	 * We can't do this in nfsd_break_deleg_cb because it is
3371 	 * already holding inode->i_lock.
3372 	 *
3373 	 * If the dl_time != 0, then we know that it has already been
3374 	 * queued for a lease break. Don't queue it again.
3375 	 */
3376 	spin_lock(&state_lock);
3377 	if (dp->dl_time == 0) {
3378 		dp->dl_time = get_seconds();
3379 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3380 	}
3381 	spin_unlock(&state_lock);
3382 }
3383 
3384 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3385 		struct rpc_task *task)
3386 {
3387 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3388 
3389 	switch (task->tk_status) {
3390 	case 0:
3391 		return 1;
3392 	case -EBADHANDLE:
3393 	case -NFS4ERR_BAD_STATEID:
3394 		/*
3395 		 * Race: client probably got cb_recall before open reply
3396 		 * granting delegation.
3397 		 */
3398 		if (dp->dl_retries--) {
3399 			rpc_delay(task, 2 * HZ);
3400 			return 0;
3401 		}
3402 		/*FALLTHRU*/
3403 	default:
3404 		return -1;
3405 	}
3406 }
3407 
3408 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3409 {
3410 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3411 
3412 	nfs4_put_stid(&dp->dl_stid);
3413 }
3414 
3415 static struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3416 	.prepare	= nfsd4_cb_recall_prepare,
3417 	.done		= nfsd4_cb_recall_done,
3418 	.release	= nfsd4_cb_recall_release,
3419 };
3420 
3421 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3422 {
3423 	/*
3424 	 * We're assuming the state code never drops its reference
3425 	 * without first removing the lease.  Since we're in this lease
3426 	 * callback (and since the lease code is serialized by the kernel
3427 	 * lock) we know the server hasn't removed the lease yet, we know
3428 	 * it's safe to take a reference.
3429 	 */
3430 	atomic_inc(&dp->dl_stid.sc_count);
3431 	nfsd4_run_cb(&dp->dl_recall);
3432 }
3433 
3434 /* Called from break_lease() with i_lock held. */
3435 static bool
3436 nfsd_break_deleg_cb(struct file_lock *fl)
3437 {
3438 	bool ret = false;
3439 	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3440 	struct nfs4_delegation *dp;
3441 
3442 	if (!fp) {
3443 		WARN(1, "(%p)->fl_owner NULL\n", fl);
3444 		return ret;
3445 	}
3446 	if (fp->fi_had_conflict) {
3447 		WARN(1, "duplicate break on %p\n", fp);
3448 		return ret;
3449 	}
3450 	/*
3451 	 * We don't want the locks code to timeout the lease for us;
3452 	 * we'll remove it ourself if a delegation isn't returned
3453 	 * in time:
3454 	 */
3455 	fl->fl_break_time = 0;
3456 
3457 	spin_lock(&fp->fi_lock);
3458 	fp->fi_had_conflict = true;
3459 	/*
3460 	 * If there are no delegations on the list, then return true
3461 	 * so that the lease code will go ahead and delete it.
3462 	 */
3463 	if (list_empty(&fp->fi_delegations))
3464 		ret = true;
3465 	else
3466 		list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3467 			nfsd_break_one_deleg(dp);
3468 	spin_unlock(&fp->fi_lock);
3469 	return ret;
3470 }
3471 
3472 static int
3473 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3474 		     struct list_head *dispose)
3475 {
3476 	if (arg & F_UNLCK)
3477 		return lease_modify(onlist, arg, dispose);
3478 	else
3479 		return -EAGAIN;
3480 }
3481 
3482 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3483 	.lm_break = nfsd_break_deleg_cb,
3484 	.lm_change = nfsd_change_deleg_cb,
3485 };
3486 
3487 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3488 {
3489 	if (nfsd4_has_session(cstate))
3490 		return nfs_ok;
3491 	if (seqid == so->so_seqid - 1)
3492 		return nfserr_replay_me;
3493 	if (seqid == so->so_seqid)
3494 		return nfs_ok;
3495 	return nfserr_bad_seqid;
3496 }
3497 
3498 static __be32 lookup_clientid(clientid_t *clid,
3499 		struct nfsd4_compound_state *cstate,
3500 		struct nfsd_net *nn)
3501 {
3502 	struct nfs4_client *found;
3503 
3504 	if (cstate->clp) {
3505 		found = cstate->clp;
3506 		if (!same_clid(&found->cl_clientid, clid))
3507 			return nfserr_stale_clientid;
3508 		return nfs_ok;
3509 	}
3510 
3511 	if (STALE_CLIENTID(clid, nn))
3512 		return nfserr_stale_clientid;
3513 
3514 	/*
3515 	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3516 	 * cached already then we know this is for is for v4.0 and "sessions"
3517 	 * will be false.
3518 	 */
3519 	WARN_ON_ONCE(cstate->session);
3520 	spin_lock(&nn->client_lock);
3521 	found = find_confirmed_client(clid, false, nn);
3522 	if (!found) {
3523 		spin_unlock(&nn->client_lock);
3524 		return nfserr_expired;
3525 	}
3526 	atomic_inc(&found->cl_refcount);
3527 	spin_unlock(&nn->client_lock);
3528 
3529 	/* Cache the nfs4_client in cstate! */
3530 	cstate->clp = found;
3531 	return nfs_ok;
3532 }
3533 
3534 __be32
3535 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3536 		    struct nfsd4_open *open, struct nfsd_net *nn)
3537 {
3538 	clientid_t *clientid = &open->op_clientid;
3539 	struct nfs4_client *clp = NULL;
3540 	unsigned int strhashval;
3541 	struct nfs4_openowner *oo = NULL;
3542 	__be32 status;
3543 
3544 	if (STALE_CLIENTID(&open->op_clientid, nn))
3545 		return nfserr_stale_clientid;
3546 	/*
3547 	 * In case we need it later, after we've already created the
3548 	 * file and don't want to risk a further failure:
3549 	 */
3550 	open->op_file = nfsd4_alloc_file();
3551 	if (open->op_file == NULL)
3552 		return nfserr_jukebox;
3553 
3554 	status = lookup_clientid(clientid, cstate, nn);
3555 	if (status)
3556 		return status;
3557 	clp = cstate->clp;
3558 
3559 	strhashval = ownerstr_hashval(&open->op_owner);
3560 	oo = find_openstateowner_str(strhashval, open, clp);
3561 	open->op_openowner = oo;
3562 	if (!oo) {
3563 		goto new_owner;
3564 	}
3565 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3566 		/* Replace unconfirmed owners without checking for replay. */
3567 		release_openowner(oo);
3568 		open->op_openowner = NULL;
3569 		goto new_owner;
3570 	}
3571 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3572 	if (status)
3573 		return status;
3574 	goto alloc_stateid;
3575 new_owner:
3576 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
3577 	if (oo == NULL)
3578 		return nfserr_jukebox;
3579 	open->op_openowner = oo;
3580 alloc_stateid:
3581 	open->op_stp = nfs4_alloc_open_stateid(clp);
3582 	if (!open->op_stp)
3583 		return nfserr_jukebox;
3584 	return nfs_ok;
3585 }
3586 
3587 static inline __be32
3588 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3589 {
3590 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3591 		return nfserr_openmode;
3592 	else
3593 		return nfs_ok;
3594 }
3595 
3596 static int share_access_to_flags(u32 share_access)
3597 {
3598 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3599 }
3600 
3601 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3602 {
3603 	struct nfs4_stid *ret;
3604 
3605 	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3606 	if (!ret)
3607 		return NULL;
3608 	return delegstateid(ret);
3609 }
3610 
3611 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3612 {
3613 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3614 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3615 }
3616 
3617 static __be32
3618 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3619 		struct nfs4_delegation **dp)
3620 {
3621 	int flags;
3622 	__be32 status = nfserr_bad_stateid;
3623 	struct nfs4_delegation *deleg;
3624 
3625 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3626 	if (deleg == NULL)
3627 		goto out;
3628 	flags = share_access_to_flags(open->op_share_access);
3629 	status = nfs4_check_delegmode(deleg, flags);
3630 	if (status) {
3631 		nfs4_put_stid(&deleg->dl_stid);
3632 		goto out;
3633 	}
3634 	*dp = deleg;
3635 out:
3636 	if (!nfsd4_is_deleg_cur(open))
3637 		return nfs_ok;
3638 	if (status)
3639 		return status;
3640 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3641 	return nfs_ok;
3642 }
3643 
3644 static struct nfs4_ol_stateid *
3645 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3646 {
3647 	struct nfs4_ol_stateid *local, *ret = NULL;
3648 	struct nfs4_openowner *oo = open->op_openowner;
3649 
3650 	spin_lock(&fp->fi_lock);
3651 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3652 		/* ignore lock owners */
3653 		if (local->st_stateowner->so_is_open_owner == 0)
3654 			continue;
3655 		if (local->st_stateowner == &oo->oo_owner) {
3656 			ret = local;
3657 			atomic_inc(&ret->st_stid.sc_count);
3658 			break;
3659 		}
3660 	}
3661 	spin_unlock(&fp->fi_lock);
3662 	return ret;
3663 }
3664 
3665 static inline int nfs4_access_to_access(u32 nfs4_access)
3666 {
3667 	int flags = 0;
3668 
3669 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3670 		flags |= NFSD_MAY_READ;
3671 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3672 		flags |= NFSD_MAY_WRITE;
3673 	return flags;
3674 }
3675 
3676 static inline __be32
3677 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3678 		struct nfsd4_open *open)
3679 {
3680 	struct iattr iattr = {
3681 		.ia_valid = ATTR_SIZE,
3682 		.ia_size = 0,
3683 	};
3684 	if (!open->op_truncate)
3685 		return 0;
3686 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3687 		return nfserr_inval;
3688 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3689 }
3690 
3691 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3692 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3693 		struct nfsd4_open *open)
3694 {
3695 	struct file *filp = NULL;
3696 	__be32 status;
3697 	int oflag = nfs4_access_to_omode(open->op_share_access);
3698 	int access = nfs4_access_to_access(open->op_share_access);
3699 	unsigned char old_access_bmap, old_deny_bmap;
3700 
3701 	spin_lock(&fp->fi_lock);
3702 
3703 	/*
3704 	 * Are we trying to set a deny mode that would conflict with
3705 	 * current access?
3706 	 */
3707 	status = nfs4_file_check_deny(fp, open->op_share_deny);
3708 	if (status != nfs_ok) {
3709 		spin_unlock(&fp->fi_lock);
3710 		goto out;
3711 	}
3712 
3713 	/* set access to the file */
3714 	status = nfs4_file_get_access(fp, open->op_share_access);
3715 	if (status != nfs_ok) {
3716 		spin_unlock(&fp->fi_lock);
3717 		goto out;
3718 	}
3719 
3720 	/* Set access bits in stateid */
3721 	old_access_bmap = stp->st_access_bmap;
3722 	set_access(open->op_share_access, stp);
3723 
3724 	/* Set new deny mask */
3725 	old_deny_bmap = stp->st_deny_bmap;
3726 	set_deny(open->op_share_deny, stp);
3727 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3728 
3729 	if (!fp->fi_fds[oflag]) {
3730 		spin_unlock(&fp->fi_lock);
3731 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3732 		if (status)
3733 			goto out_put_access;
3734 		spin_lock(&fp->fi_lock);
3735 		if (!fp->fi_fds[oflag]) {
3736 			fp->fi_fds[oflag] = filp;
3737 			filp = NULL;
3738 		}
3739 	}
3740 	spin_unlock(&fp->fi_lock);
3741 	if (filp)
3742 		fput(filp);
3743 
3744 	status = nfsd4_truncate(rqstp, cur_fh, open);
3745 	if (status)
3746 		goto out_put_access;
3747 out:
3748 	return status;
3749 out_put_access:
3750 	stp->st_access_bmap = old_access_bmap;
3751 	nfs4_file_put_access(fp, open->op_share_access);
3752 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
3753 	goto out;
3754 }
3755 
3756 static __be32
3757 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3758 {
3759 	__be32 status;
3760 	unsigned char old_deny_bmap;
3761 
3762 	if (!test_access(open->op_share_access, stp))
3763 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3764 
3765 	/* test and set deny mode */
3766 	spin_lock(&fp->fi_lock);
3767 	status = nfs4_file_check_deny(fp, open->op_share_deny);
3768 	if (status == nfs_ok) {
3769 		old_deny_bmap = stp->st_deny_bmap;
3770 		set_deny(open->op_share_deny, stp);
3771 		fp->fi_share_deny |=
3772 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3773 	}
3774 	spin_unlock(&fp->fi_lock);
3775 
3776 	if (status != nfs_ok)
3777 		return status;
3778 
3779 	status = nfsd4_truncate(rqstp, cur_fh, open);
3780 	if (status != nfs_ok)
3781 		reset_union_bmap_deny(old_deny_bmap, stp);
3782 	return status;
3783 }
3784 
3785 static void
3786 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
3787 {
3788 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3789 }
3790 
3791 /* Should we give out recallable state?: */
3792 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3793 {
3794 	if (clp->cl_cb_state == NFSD4_CB_UP)
3795 		return true;
3796 	/*
3797 	 * In the sessions case, since we don't have to establish a
3798 	 * separate connection for callbacks, we assume it's OK
3799 	 * until we hear otherwise:
3800 	 */
3801 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3802 }
3803 
3804 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
3805 {
3806 	struct file_lock *fl;
3807 
3808 	fl = locks_alloc_lock();
3809 	if (!fl)
3810 		return NULL;
3811 	fl->fl_lmops = &nfsd_lease_mng_ops;
3812 	fl->fl_flags = FL_DELEG;
3813 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3814 	fl->fl_end = OFFSET_MAX;
3815 	fl->fl_owner = (fl_owner_t)fp;
3816 	fl->fl_pid = current->tgid;
3817 	return fl;
3818 }
3819 
3820 static int nfs4_setlease(struct nfs4_delegation *dp)
3821 {
3822 	struct nfs4_file *fp = dp->dl_stid.sc_file;
3823 	struct file_lock *fl, *ret;
3824 	struct file *filp;
3825 	int status = 0;
3826 
3827 	fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
3828 	if (!fl)
3829 		return -ENOMEM;
3830 	filp = find_readable_file(fp);
3831 	if (!filp) {
3832 		/* We should always have a readable file here */
3833 		WARN_ON_ONCE(1);
3834 		return -EBADF;
3835 	}
3836 	fl->fl_file = filp;
3837 	ret = fl;
3838 	status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
3839 	if (fl)
3840 		locks_free_lock(fl);
3841 	if (status)
3842 		goto out_fput;
3843 	spin_lock(&state_lock);
3844 	spin_lock(&fp->fi_lock);
3845 	/* Did the lease get broken before we took the lock? */
3846 	status = -EAGAIN;
3847 	if (fp->fi_had_conflict)
3848 		goto out_unlock;
3849 	/* Race breaker */
3850 	if (fp->fi_deleg_file) {
3851 		status = 0;
3852 		++fp->fi_delegees;
3853 		hash_delegation_locked(dp, fp);
3854 		goto out_unlock;
3855 	}
3856 	fp->fi_deleg_file = filp;
3857 	fp->fi_delegees = 1;
3858 	hash_delegation_locked(dp, fp);
3859 	spin_unlock(&fp->fi_lock);
3860 	spin_unlock(&state_lock);
3861 	return 0;
3862 out_unlock:
3863 	spin_unlock(&fp->fi_lock);
3864 	spin_unlock(&state_lock);
3865 out_fput:
3866 	fput(filp);
3867 	return status;
3868 }
3869 
3870 static struct nfs4_delegation *
3871 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3872 		    struct nfs4_file *fp)
3873 {
3874 	int status;
3875 	struct nfs4_delegation *dp;
3876 
3877 	if (fp->fi_had_conflict)
3878 		return ERR_PTR(-EAGAIN);
3879 
3880 	dp = alloc_init_deleg(clp, fh);
3881 	if (!dp)
3882 		return ERR_PTR(-ENOMEM);
3883 
3884 	get_nfs4_file(fp);
3885 	spin_lock(&state_lock);
3886 	spin_lock(&fp->fi_lock);
3887 	dp->dl_stid.sc_file = fp;
3888 	if (!fp->fi_deleg_file) {
3889 		spin_unlock(&fp->fi_lock);
3890 		spin_unlock(&state_lock);
3891 		status = nfs4_setlease(dp);
3892 		goto out;
3893 	}
3894 	if (fp->fi_had_conflict) {
3895 		status = -EAGAIN;
3896 		goto out_unlock;
3897 	}
3898 	++fp->fi_delegees;
3899 	hash_delegation_locked(dp, fp);
3900 	status = 0;
3901 out_unlock:
3902 	spin_unlock(&fp->fi_lock);
3903 	spin_unlock(&state_lock);
3904 out:
3905 	if (status) {
3906 		nfs4_put_stid(&dp->dl_stid);
3907 		return ERR_PTR(status);
3908 	}
3909 	return dp;
3910 }
3911 
3912 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3913 {
3914 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3915 	if (status == -EAGAIN)
3916 		open->op_why_no_deleg = WND4_CONTENTION;
3917 	else {
3918 		open->op_why_no_deleg = WND4_RESOURCE;
3919 		switch (open->op_deleg_want) {
3920 		case NFS4_SHARE_WANT_READ_DELEG:
3921 		case NFS4_SHARE_WANT_WRITE_DELEG:
3922 		case NFS4_SHARE_WANT_ANY_DELEG:
3923 			break;
3924 		case NFS4_SHARE_WANT_CANCEL:
3925 			open->op_why_no_deleg = WND4_CANCELLED;
3926 			break;
3927 		case NFS4_SHARE_WANT_NO_DELEG:
3928 			WARN_ON_ONCE(1);
3929 		}
3930 	}
3931 }
3932 
3933 /*
3934  * Attempt to hand out a delegation.
3935  *
3936  * Note we don't support write delegations, and won't until the vfs has
3937  * proper support for them.
3938  */
3939 static void
3940 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
3941 			struct nfs4_ol_stateid *stp)
3942 {
3943 	struct nfs4_delegation *dp;
3944 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
3945 	struct nfs4_client *clp = stp->st_stid.sc_client;
3946 	int cb_up;
3947 	int status = 0;
3948 
3949 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3950 	open->op_recall = 0;
3951 	switch (open->op_claim_type) {
3952 		case NFS4_OPEN_CLAIM_PREVIOUS:
3953 			if (!cb_up)
3954 				open->op_recall = 1;
3955 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
3956 				goto out_no_deleg;
3957 			break;
3958 		case NFS4_OPEN_CLAIM_NULL:
3959 		case NFS4_OPEN_CLAIM_FH:
3960 			/*
3961 			 * Let's not give out any delegations till everyone's
3962 			 * had the chance to reclaim theirs....
3963 			 */
3964 			if (locks_in_grace(clp->net))
3965 				goto out_no_deleg;
3966 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3967 				goto out_no_deleg;
3968 			/*
3969 			 * Also, if the file was opened for write or
3970 			 * create, there's a good chance the client's
3971 			 * about to write to it, resulting in an
3972 			 * immediate recall (since we don't support
3973 			 * write delegations):
3974 			 */
3975 			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3976 				goto out_no_deleg;
3977 			if (open->op_create == NFS4_OPEN_CREATE)
3978 				goto out_no_deleg;
3979 			break;
3980 		default:
3981 			goto out_no_deleg;
3982 	}
3983 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
3984 	if (IS_ERR(dp))
3985 		goto out_no_deleg;
3986 
3987 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3988 
3989 	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3990 		STATEID_VAL(&dp->dl_stid.sc_stateid));
3991 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3992 	nfs4_put_stid(&dp->dl_stid);
3993 	return;
3994 out_no_deleg:
3995 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
3996 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3997 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3998 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3999 		open->op_recall = 1;
4000 	}
4001 
4002 	/* 4.1 client asking for a delegation? */
4003 	if (open->op_deleg_want)
4004 		nfsd4_open_deleg_none_ext(open, status);
4005 	return;
4006 }
4007 
4008 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4009 					struct nfs4_delegation *dp)
4010 {
4011 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4012 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4013 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4014 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4015 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4016 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4017 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4018 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4019 	}
4020 	/* Otherwise the client must be confused wanting a delegation
4021 	 * it already has, therefore we don't return
4022 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4023 	 */
4024 }
4025 
4026 __be32
4027 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4028 {
4029 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
4030 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4031 	struct nfs4_file *fp = NULL;
4032 	struct nfs4_ol_stateid *stp = NULL;
4033 	struct nfs4_delegation *dp = NULL;
4034 	__be32 status;
4035 
4036 	/*
4037 	 * Lookup file; if found, lookup stateid and check open request,
4038 	 * and check for delegations in the process of being recalled.
4039 	 * If not found, create the nfs4_file struct
4040 	 */
4041 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4042 	if (fp != open->op_file) {
4043 		status = nfs4_check_deleg(cl, open, &dp);
4044 		if (status)
4045 			goto out;
4046 		stp = nfsd4_find_existing_open(fp, open);
4047 	} else {
4048 		open->op_file = NULL;
4049 		status = nfserr_bad_stateid;
4050 		if (nfsd4_is_deleg_cur(open))
4051 			goto out;
4052 		status = nfserr_jukebox;
4053 	}
4054 
4055 	/*
4056 	 * OPEN the file, or upgrade an existing OPEN.
4057 	 * If truncate fails, the OPEN fails.
4058 	 */
4059 	if (stp) {
4060 		/* Stateid was found, this is an OPEN upgrade */
4061 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4062 		if (status)
4063 			goto out;
4064 	} else {
4065 		stp = open->op_stp;
4066 		open->op_stp = NULL;
4067 		init_open_stateid(stp, fp, open);
4068 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4069 		if (status) {
4070 			release_open_stateid(stp);
4071 			goto out;
4072 		}
4073 	}
4074 	update_stateid(&stp->st_stid.sc_stateid);
4075 	memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4076 
4077 	if (nfsd4_has_session(&resp->cstate)) {
4078 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4079 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4080 			open->op_why_no_deleg = WND4_NOT_WANTED;
4081 			goto nodeleg;
4082 		}
4083 	}
4084 
4085 	/*
4086 	* Attempt to hand out a delegation. No error return, because the
4087 	* OPEN succeeds even if we fail.
4088 	*/
4089 	nfs4_open_delegation(current_fh, open, stp);
4090 nodeleg:
4091 	status = nfs_ok;
4092 
4093 	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4094 		STATEID_VAL(&stp->st_stid.sc_stateid));
4095 out:
4096 	/* 4.1 client trying to upgrade/downgrade delegation? */
4097 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4098 	    open->op_deleg_want)
4099 		nfsd4_deleg_xgrade_none_ext(open, dp);
4100 
4101 	if (fp)
4102 		put_nfs4_file(fp);
4103 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4104 		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
4105 	/*
4106 	* To finish the open response, we just need to set the rflags.
4107 	*/
4108 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4109 	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
4110 	    !nfsd4_has_session(&resp->cstate))
4111 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4112 	if (dp)
4113 		nfs4_put_stid(&dp->dl_stid);
4114 	if (stp)
4115 		nfs4_put_stid(&stp->st_stid);
4116 
4117 	return status;
4118 }
4119 
4120 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4121 			      struct nfsd4_open *open, __be32 status)
4122 {
4123 	if (open->op_openowner) {
4124 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4125 
4126 		nfsd4_cstate_assign_replay(cstate, so);
4127 		nfs4_put_stateowner(so);
4128 	}
4129 	if (open->op_file)
4130 		kmem_cache_free(file_slab, open->op_file);
4131 	if (open->op_stp)
4132 		nfs4_put_stid(&open->op_stp->st_stid);
4133 }
4134 
4135 __be32
4136 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4137 	    clientid_t *clid)
4138 {
4139 	struct nfs4_client *clp;
4140 	__be32 status;
4141 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4142 
4143 	dprintk("process_renew(%08x/%08x): starting\n",
4144 			clid->cl_boot, clid->cl_id);
4145 	status = lookup_clientid(clid, cstate, nn);
4146 	if (status)
4147 		goto out;
4148 	clp = cstate->clp;
4149 	status = nfserr_cb_path_down;
4150 	if (!list_empty(&clp->cl_delegations)
4151 			&& clp->cl_cb_state != NFSD4_CB_UP)
4152 		goto out;
4153 	status = nfs_ok;
4154 out:
4155 	return status;
4156 }
4157 
4158 void
4159 nfsd4_end_grace(struct nfsd_net *nn)
4160 {
4161 	/* do nothing if grace period already ended */
4162 	if (nn->grace_ended)
4163 		return;
4164 
4165 	dprintk("NFSD: end of grace period\n");
4166 	nn->grace_ended = true;
4167 	/*
4168 	 * If the server goes down again right now, an NFSv4
4169 	 * client will still be allowed to reclaim after it comes back up,
4170 	 * even if it hasn't yet had a chance to reclaim state this time.
4171 	 *
4172 	 */
4173 	nfsd4_record_grace_done(nn);
4174 	/*
4175 	 * At this point, NFSv4 clients can still reclaim.  But if the
4176 	 * server crashes, any that have not yet reclaimed will be out
4177 	 * of luck on the next boot.
4178 	 *
4179 	 * (NFSv4.1+ clients are considered to have reclaimed once they
4180 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
4181 	 * have reclaimed after their first OPEN.)
4182 	 */
4183 	locks_end_grace(&nn->nfsd4_manager);
4184 	/*
4185 	 * At this point, and once lockd and/or any other containers
4186 	 * exit their grace period, further reclaims will fail and
4187 	 * regular locking can resume.
4188 	 */
4189 }
4190 
4191 static time_t
4192 nfs4_laundromat(struct nfsd_net *nn)
4193 {
4194 	struct nfs4_client *clp;
4195 	struct nfs4_openowner *oo;
4196 	struct nfs4_delegation *dp;
4197 	struct nfs4_ol_stateid *stp;
4198 	struct list_head *pos, *next, reaplist;
4199 	time_t cutoff = get_seconds() - nn->nfsd4_lease;
4200 	time_t t, new_timeo = nn->nfsd4_lease;
4201 
4202 	dprintk("NFSD: laundromat service - starting\n");
4203 	nfsd4_end_grace(nn);
4204 	INIT_LIST_HEAD(&reaplist);
4205 	spin_lock(&nn->client_lock);
4206 	list_for_each_safe(pos, next, &nn->client_lru) {
4207 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4208 		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4209 			t = clp->cl_time - cutoff;
4210 			new_timeo = min(new_timeo, t);
4211 			break;
4212 		}
4213 		if (mark_client_expired_locked(clp)) {
4214 			dprintk("NFSD: client in use (clientid %08x)\n",
4215 				clp->cl_clientid.cl_id);
4216 			continue;
4217 		}
4218 		list_add(&clp->cl_lru, &reaplist);
4219 	}
4220 	spin_unlock(&nn->client_lock);
4221 	list_for_each_safe(pos, next, &reaplist) {
4222 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4223 		dprintk("NFSD: purging unused client (clientid %08x)\n",
4224 			clp->cl_clientid.cl_id);
4225 		list_del_init(&clp->cl_lru);
4226 		expire_client(clp);
4227 	}
4228 	spin_lock(&state_lock);
4229 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
4230 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4231 		if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
4232 			continue;
4233 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4234 			t = dp->dl_time - cutoff;
4235 			new_timeo = min(new_timeo, t);
4236 			break;
4237 		}
4238 		unhash_delegation_locked(dp);
4239 		list_add(&dp->dl_recall_lru, &reaplist);
4240 	}
4241 	spin_unlock(&state_lock);
4242 	while (!list_empty(&reaplist)) {
4243 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
4244 					dl_recall_lru);
4245 		list_del_init(&dp->dl_recall_lru);
4246 		revoke_delegation(dp);
4247 	}
4248 
4249 	spin_lock(&nn->client_lock);
4250 	while (!list_empty(&nn->close_lru)) {
4251 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4252 					oo_close_lru);
4253 		if (time_after((unsigned long)oo->oo_time,
4254 			       (unsigned long)cutoff)) {
4255 			t = oo->oo_time - cutoff;
4256 			new_timeo = min(new_timeo, t);
4257 			break;
4258 		}
4259 		list_del_init(&oo->oo_close_lru);
4260 		stp = oo->oo_last_closed_stid;
4261 		oo->oo_last_closed_stid = NULL;
4262 		spin_unlock(&nn->client_lock);
4263 		nfs4_put_stid(&stp->st_stid);
4264 		spin_lock(&nn->client_lock);
4265 	}
4266 	spin_unlock(&nn->client_lock);
4267 
4268 	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4269 	return new_timeo;
4270 }
4271 
4272 static struct workqueue_struct *laundry_wq;
4273 static void laundromat_main(struct work_struct *);
4274 
4275 static void
4276 laundromat_main(struct work_struct *laundry)
4277 {
4278 	time_t t;
4279 	struct delayed_work *dwork = container_of(laundry, struct delayed_work,
4280 						  work);
4281 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4282 					   laundromat_work);
4283 
4284 	t = nfs4_laundromat(nn);
4285 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4286 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4287 }
4288 
4289 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
4290 {
4291 	if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
4292 		return nfserr_bad_stateid;
4293 	return nfs_ok;
4294 }
4295 
4296 static inline int
4297 access_permit_read(struct nfs4_ol_stateid *stp)
4298 {
4299 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4300 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4301 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4302 }
4303 
4304 static inline int
4305 access_permit_write(struct nfs4_ol_stateid *stp)
4306 {
4307 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4308 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4309 }
4310 
4311 static
4312 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4313 {
4314         __be32 status = nfserr_openmode;
4315 
4316 	/* For lock stateid's, we test the parent open, not the lock: */
4317 	if (stp->st_openstp)
4318 		stp = stp->st_openstp;
4319 	if ((flags & WR_STATE) && !access_permit_write(stp))
4320                 goto out;
4321 	if ((flags & RD_STATE) && !access_permit_read(stp))
4322                 goto out;
4323 	status = nfs_ok;
4324 out:
4325 	return status;
4326 }
4327 
4328 static inline __be32
4329 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4330 {
4331 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
4332 		return nfs_ok;
4333 	else if (locks_in_grace(net)) {
4334 		/* Answer in remaining cases depends on existence of
4335 		 * conflicting state; so we must wait out the grace period. */
4336 		return nfserr_grace;
4337 	} else if (flags & WR_STATE)
4338 		return nfs4_share_conflict(current_fh,
4339 				NFS4_SHARE_DENY_WRITE);
4340 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4341 		return nfs4_share_conflict(current_fh,
4342 				NFS4_SHARE_DENY_READ);
4343 }
4344 
4345 /*
4346  * Allow READ/WRITE during grace period on recovered state only for files
4347  * that are not able to provide mandatory locking.
4348  */
4349 static inline int
4350 grace_disallows_io(struct net *net, struct inode *inode)
4351 {
4352 	return locks_in_grace(net) && mandatory_lock(inode);
4353 }
4354 
4355 /* Returns true iff a is later than b: */
4356 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
4357 {
4358 	return (s32)(a->si_generation - b->si_generation) > 0;
4359 }
4360 
4361 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4362 {
4363 	/*
4364 	 * When sessions are used the stateid generation number is ignored
4365 	 * when it is zero.
4366 	 */
4367 	if (has_session && in->si_generation == 0)
4368 		return nfs_ok;
4369 
4370 	if (in->si_generation == ref->si_generation)
4371 		return nfs_ok;
4372 
4373 	/* If the client sends us a stateid from the future, it's buggy: */
4374 	if (stateid_generation_after(in, ref))
4375 		return nfserr_bad_stateid;
4376 	/*
4377 	 * However, we could see a stateid from the past, even from a
4378 	 * non-buggy client.  For example, if the client sends a lock
4379 	 * while some IO is outstanding, the lock may bump si_generation
4380 	 * while the IO is still in flight.  The client could avoid that
4381 	 * situation by waiting for responses on all the IO requests,
4382 	 * but better performance may result in retrying IO that
4383 	 * receives an old_stateid error if requests are rarely
4384 	 * reordered in flight:
4385 	 */
4386 	return nfserr_old_stateid;
4387 }
4388 
4389 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4390 {
4391 	struct nfs4_stid *s;
4392 	struct nfs4_ol_stateid *ols;
4393 	__be32 status = nfserr_bad_stateid;
4394 
4395 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4396 		return status;
4397 	/* Client debugging aid. */
4398 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4399 		char addr_str[INET6_ADDRSTRLEN];
4400 		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4401 				 sizeof(addr_str));
4402 		pr_warn_ratelimited("NFSD: client %s testing state ID "
4403 					"with incorrect client ID\n", addr_str);
4404 		return status;
4405 	}
4406 	spin_lock(&cl->cl_lock);
4407 	s = find_stateid_locked(cl, stateid);
4408 	if (!s)
4409 		goto out_unlock;
4410 	status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4411 	if (status)
4412 		goto out_unlock;
4413 	switch (s->sc_type) {
4414 	case NFS4_DELEG_STID:
4415 		status = nfs_ok;
4416 		break;
4417 	case NFS4_REVOKED_DELEG_STID:
4418 		status = nfserr_deleg_revoked;
4419 		break;
4420 	case NFS4_OPEN_STID:
4421 	case NFS4_LOCK_STID:
4422 		ols = openlockstateid(s);
4423 		if (ols->st_stateowner->so_is_open_owner
4424 	    			&& !(openowner(ols->st_stateowner)->oo_flags
4425 						& NFS4_OO_CONFIRMED))
4426 			status = nfserr_bad_stateid;
4427 		else
4428 			status = nfs_ok;
4429 		break;
4430 	default:
4431 		printk("unknown stateid type %x\n", s->sc_type);
4432 		/* Fallthrough */
4433 	case NFS4_CLOSED_STID:
4434 	case NFS4_CLOSED_DELEG_STID:
4435 		status = nfserr_bad_stateid;
4436 	}
4437 out_unlock:
4438 	spin_unlock(&cl->cl_lock);
4439 	return status;
4440 }
4441 
4442 __be32
4443 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4444 		     stateid_t *stateid, unsigned char typemask,
4445 		     struct nfs4_stid **s, struct nfsd_net *nn)
4446 {
4447 	__be32 status;
4448 
4449 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4450 		return nfserr_bad_stateid;
4451 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4452 	if (status == nfserr_stale_clientid) {
4453 		if (cstate->session)
4454 			return nfserr_bad_stateid;
4455 		return nfserr_stale_stateid;
4456 	}
4457 	if (status)
4458 		return status;
4459 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
4460 	if (!*s)
4461 		return nfserr_bad_stateid;
4462 	return nfs_ok;
4463 }
4464 
4465 /*
4466 * Checks for stateid operations
4467 */
4468 __be32
4469 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
4470 			   stateid_t *stateid, int flags, struct file **filpp)
4471 {
4472 	struct nfs4_stid *s;
4473 	struct nfs4_ol_stateid *stp = NULL;
4474 	struct nfs4_delegation *dp = NULL;
4475 	struct svc_fh *current_fh = &cstate->current_fh;
4476 	struct inode *ino = current_fh->fh_dentry->d_inode;
4477 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4478 	struct file *file = NULL;
4479 	__be32 status;
4480 
4481 	if (filpp)
4482 		*filpp = NULL;
4483 
4484 	if (grace_disallows_io(net, ino))
4485 		return nfserr_grace;
4486 
4487 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4488 		return check_special_stateids(net, current_fh, stateid, flags);
4489 
4490 	status = nfsd4_lookup_stateid(cstate, stateid,
4491 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4492 				&s, nn);
4493 	if (status)
4494 		return status;
4495 	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
4496 	if (status)
4497 		goto out;
4498 	switch (s->sc_type) {
4499 	case NFS4_DELEG_STID:
4500 		dp = delegstateid(s);
4501 		status = nfs4_check_delegmode(dp, flags);
4502 		if (status)
4503 			goto out;
4504 		if (filpp) {
4505 			file = dp->dl_stid.sc_file->fi_deleg_file;
4506 			if (!file) {
4507 				WARN_ON_ONCE(1);
4508 				status = nfserr_serverfault;
4509 				goto out;
4510 			}
4511 			get_file(file);
4512 		}
4513 		break;
4514 	case NFS4_OPEN_STID:
4515 	case NFS4_LOCK_STID:
4516 		stp = openlockstateid(s);
4517 		status = nfs4_check_fh(current_fh, stp);
4518 		if (status)
4519 			goto out;
4520 		if (stp->st_stateowner->so_is_open_owner
4521 		    && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4522 			goto out;
4523 		status = nfs4_check_openmode(stp, flags);
4524 		if (status)
4525 			goto out;
4526 		if (filpp) {
4527 			struct nfs4_file *fp = stp->st_stid.sc_file;
4528 
4529 			if (flags & RD_STATE)
4530 				file = find_readable_file(fp);
4531 			else
4532 				file = find_writeable_file(fp);
4533 		}
4534 		break;
4535 	default:
4536 		status = nfserr_bad_stateid;
4537 		goto out;
4538 	}
4539 	status = nfs_ok;
4540 	if (file)
4541 		*filpp = file;
4542 out:
4543 	nfs4_put_stid(s);
4544 	return status;
4545 }
4546 
4547 /*
4548  * Test if the stateid is valid
4549  */
4550 __be32
4551 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4552 		   struct nfsd4_test_stateid *test_stateid)
4553 {
4554 	struct nfsd4_test_stateid_id *stateid;
4555 	struct nfs4_client *cl = cstate->session->se_client;
4556 
4557 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4558 		stateid->ts_id_status =
4559 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4560 
4561 	return nfs_ok;
4562 }
4563 
4564 __be32
4565 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4566 		   struct nfsd4_free_stateid *free_stateid)
4567 {
4568 	stateid_t *stateid = &free_stateid->fr_stateid;
4569 	struct nfs4_stid *s;
4570 	struct nfs4_delegation *dp;
4571 	struct nfs4_ol_stateid *stp;
4572 	struct nfs4_client *cl = cstate->session->se_client;
4573 	__be32 ret = nfserr_bad_stateid;
4574 
4575 	spin_lock(&cl->cl_lock);
4576 	s = find_stateid_locked(cl, stateid);
4577 	if (!s)
4578 		goto out_unlock;
4579 	switch (s->sc_type) {
4580 	case NFS4_DELEG_STID:
4581 		ret = nfserr_locks_held;
4582 		break;
4583 	case NFS4_OPEN_STID:
4584 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4585 		if (ret)
4586 			break;
4587 		ret = nfserr_locks_held;
4588 		break;
4589 	case NFS4_LOCK_STID:
4590 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4591 		if (ret)
4592 			break;
4593 		stp = openlockstateid(s);
4594 		ret = nfserr_locks_held;
4595 		if (check_for_locks(stp->st_stid.sc_file,
4596 				    lockowner(stp->st_stateowner)))
4597 			break;
4598 		unhash_lock_stateid(stp);
4599 		spin_unlock(&cl->cl_lock);
4600 		nfs4_put_stid(s);
4601 		ret = nfs_ok;
4602 		goto out;
4603 	case NFS4_REVOKED_DELEG_STID:
4604 		dp = delegstateid(s);
4605 		list_del_init(&dp->dl_recall_lru);
4606 		spin_unlock(&cl->cl_lock);
4607 		nfs4_put_stid(s);
4608 		ret = nfs_ok;
4609 		goto out;
4610 	/* Default falls through and returns nfserr_bad_stateid */
4611 	}
4612 out_unlock:
4613 	spin_unlock(&cl->cl_lock);
4614 out:
4615 	return ret;
4616 }
4617 
4618 static inline int
4619 setlkflg (int type)
4620 {
4621 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
4622 		RD_STATE : WR_STATE;
4623 }
4624 
4625 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4626 {
4627 	struct svc_fh *current_fh = &cstate->current_fh;
4628 	struct nfs4_stateowner *sop = stp->st_stateowner;
4629 	__be32 status;
4630 
4631 	status = nfsd4_check_seqid(cstate, sop, seqid);
4632 	if (status)
4633 		return status;
4634 	if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4635 		|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4636 		/*
4637 		 * "Closed" stateid's exist *only* to return
4638 		 * nfserr_replay_me from the previous step, and
4639 		 * revoked delegations are kept only for free_stateid.
4640 		 */
4641 		return nfserr_bad_stateid;
4642 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4643 	if (status)
4644 		return status;
4645 	return nfs4_check_fh(current_fh, stp);
4646 }
4647 
4648 /*
4649  * Checks for sequence id mutating operations.
4650  */
4651 static __be32
4652 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4653 			 stateid_t *stateid, char typemask,
4654 			 struct nfs4_ol_stateid **stpp,
4655 			 struct nfsd_net *nn)
4656 {
4657 	__be32 status;
4658 	struct nfs4_stid *s;
4659 	struct nfs4_ol_stateid *stp = NULL;
4660 
4661 	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
4662 		seqid, STATEID_VAL(stateid));
4663 
4664 	*stpp = NULL;
4665 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4666 	if (status)
4667 		return status;
4668 	stp = openlockstateid(s);
4669 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
4670 
4671 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4672 	if (!status)
4673 		*stpp = stp;
4674 	else
4675 		nfs4_put_stid(&stp->st_stid);
4676 	return status;
4677 }
4678 
4679 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4680 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4681 {
4682 	__be32 status;
4683 	struct nfs4_openowner *oo;
4684 	struct nfs4_ol_stateid *stp;
4685 
4686 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4687 						NFS4_OPEN_STID, &stp, nn);
4688 	if (status)
4689 		return status;
4690 	oo = openowner(stp->st_stateowner);
4691 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4692 		nfs4_put_stid(&stp->st_stid);
4693 		return nfserr_bad_stateid;
4694 	}
4695 	*stpp = stp;
4696 	return nfs_ok;
4697 }
4698 
4699 __be32
4700 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4701 		   struct nfsd4_open_confirm *oc)
4702 {
4703 	__be32 status;
4704 	struct nfs4_openowner *oo;
4705 	struct nfs4_ol_stateid *stp;
4706 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4707 
4708 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4709 			cstate->current_fh.fh_dentry);
4710 
4711 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4712 	if (status)
4713 		return status;
4714 
4715 	status = nfs4_preprocess_seqid_op(cstate,
4716 					oc->oc_seqid, &oc->oc_req_stateid,
4717 					NFS4_OPEN_STID, &stp, nn);
4718 	if (status)
4719 		goto out;
4720 	oo = openowner(stp->st_stateowner);
4721 	status = nfserr_bad_stateid;
4722 	if (oo->oo_flags & NFS4_OO_CONFIRMED)
4723 		goto put_stateid;
4724 	oo->oo_flags |= NFS4_OO_CONFIRMED;
4725 	update_stateid(&stp->st_stid.sc_stateid);
4726 	memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4727 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4728 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4729 
4730 	nfsd4_client_record_create(oo->oo_owner.so_client);
4731 	status = nfs_ok;
4732 put_stateid:
4733 	nfs4_put_stid(&stp->st_stid);
4734 out:
4735 	nfsd4_bump_seqid(cstate, status);
4736 	return status;
4737 }
4738 
4739 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
4740 {
4741 	if (!test_access(access, stp))
4742 		return;
4743 	nfs4_file_put_access(stp->st_stid.sc_file, access);
4744 	clear_access(access, stp);
4745 }
4746 
4747 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
4748 {
4749 	switch (to_access) {
4750 	case NFS4_SHARE_ACCESS_READ:
4751 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
4752 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4753 		break;
4754 	case NFS4_SHARE_ACCESS_WRITE:
4755 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
4756 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4757 		break;
4758 	case NFS4_SHARE_ACCESS_BOTH:
4759 		break;
4760 	default:
4761 		WARN_ON_ONCE(1);
4762 	}
4763 }
4764 
4765 __be32
4766 nfsd4_open_downgrade(struct svc_rqst *rqstp,
4767 		     struct nfsd4_compound_state *cstate,
4768 		     struct nfsd4_open_downgrade *od)
4769 {
4770 	__be32 status;
4771 	struct nfs4_ol_stateid *stp;
4772 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4773 
4774 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
4775 			cstate->current_fh.fh_dentry);
4776 
4777 	/* We don't yet support WANT bits: */
4778 	if (od->od_deleg_want)
4779 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
4780 			od->od_deleg_want);
4781 
4782 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4783 					&od->od_stateid, &stp, nn);
4784 	if (status)
4785 		goto out;
4786 	status = nfserr_inval;
4787 	if (!test_access(od->od_share_access, stp)) {
4788 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4789 			stp->st_access_bmap, od->od_share_access);
4790 		goto put_stateid;
4791 	}
4792 	if (!test_deny(od->od_share_deny, stp)) {
4793 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4794 			stp->st_deny_bmap, od->od_share_deny);
4795 		goto put_stateid;
4796 	}
4797 	nfs4_stateid_downgrade(stp, od->od_share_access);
4798 
4799 	reset_union_bmap_deny(od->od_share_deny, stp);
4800 
4801 	update_stateid(&stp->st_stid.sc_stateid);
4802 	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4803 	status = nfs_ok;
4804 put_stateid:
4805 	nfs4_put_stid(&stp->st_stid);
4806 out:
4807 	nfsd4_bump_seqid(cstate, status);
4808 	return status;
4809 }
4810 
4811 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
4812 {
4813 	struct nfs4_client *clp = s->st_stid.sc_client;
4814 	LIST_HEAD(reaplist);
4815 
4816 	s->st_stid.sc_type = NFS4_CLOSED_STID;
4817 	spin_lock(&clp->cl_lock);
4818 	unhash_open_stateid(s, &reaplist);
4819 
4820 	if (clp->cl_minorversion) {
4821 		put_ol_stateid_locked(s, &reaplist);
4822 		spin_unlock(&clp->cl_lock);
4823 		free_ol_stateid_reaplist(&reaplist);
4824 	} else {
4825 		spin_unlock(&clp->cl_lock);
4826 		free_ol_stateid_reaplist(&reaplist);
4827 		move_to_close_lru(s, clp->net);
4828 	}
4829 }
4830 
4831 /*
4832  * nfs4_unlock_state() called after encode
4833  */
4834 __be32
4835 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4836 	    struct nfsd4_close *close)
4837 {
4838 	__be32 status;
4839 	struct nfs4_ol_stateid *stp;
4840 	struct net *net = SVC_NET(rqstp);
4841 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4842 
4843 	dprintk("NFSD: nfsd4_close on file %pd\n",
4844 			cstate->current_fh.fh_dentry);
4845 
4846 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4847 					&close->cl_stateid,
4848 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
4849 					&stp, nn);
4850 	nfsd4_bump_seqid(cstate, status);
4851 	if (status)
4852 		goto out;
4853 	update_stateid(&stp->st_stid.sc_stateid);
4854 	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4855 
4856 	nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
4857 				      stp->st_stid.sc_file);
4858 
4859 	nfsd4_close_open_stateid(stp);
4860 
4861 	/* put reference from nfs4_preprocess_seqid_op */
4862 	nfs4_put_stid(&stp->st_stid);
4863 out:
4864 	return status;
4865 }
4866 
4867 __be32
4868 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4869 		  struct nfsd4_delegreturn *dr)
4870 {
4871 	struct nfs4_delegation *dp;
4872 	stateid_t *stateid = &dr->dr_stateid;
4873 	struct nfs4_stid *s;
4874 	__be32 status;
4875 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4876 
4877 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4878 		return status;
4879 
4880 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4881 	if (status)
4882 		goto out;
4883 	dp = delegstateid(s);
4884 	status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4885 	if (status)
4886 		goto put_stateid;
4887 
4888 	destroy_delegation(dp);
4889 put_stateid:
4890 	nfs4_put_stid(&dp->dl_stid);
4891 out:
4892 	return status;
4893 }
4894 
4895 
4896 #define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
4897 
4898 static inline u64
4899 end_offset(u64 start, u64 len)
4900 {
4901 	u64 end;
4902 
4903 	end = start + len;
4904 	return end >= start ? end: NFS4_MAX_UINT64;
4905 }
4906 
4907 /* last octet in a range */
4908 static inline u64
4909 last_byte_offset(u64 start, u64 len)
4910 {
4911 	u64 end;
4912 
4913 	WARN_ON_ONCE(!len);
4914 	end = start + len;
4915 	return end > start ? end - 1: NFS4_MAX_UINT64;
4916 }
4917 
4918 /*
4919  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4920  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4921  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
4922  * locking, this prevents us from being completely protocol-compliant.  The
4923  * real solution to this problem is to start using unsigned file offsets in
4924  * the VFS, but this is a very deep change!
4925  */
4926 static inline void
4927 nfs4_transform_lock_offset(struct file_lock *lock)
4928 {
4929 	if (lock->fl_start < 0)
4930 		lock->fl_start = OFFSET_MAX;
4931 	if (lock->fl_end < 0)
4932 		lock->fl_end = OFFSET_MAX;
4933 }
4934 
4935 static fl_owner_t
4936 nfsd4_fl_get_owner(fl_owner_t owner)
4937 {
4938 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
4939 
4940 	nfs4_get_stateowner(&lo->lo_owner);
4941 	return owner;
4942 }
4943 
4944 static void
4945 nfsd4_fl_put_owner(fl_owner_t owner)
4946 {
4947 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
4948 
4949 	if (lo)
4950 		nfs4_put_stateowner(&lo->lo_owner);
4951 }
4952 
4953 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
4954 	.lm_get_owner = nfsd4_fl_get_owner,
4955 	.lm_put_owner = nfsd4_fl_put_owner,
4956 };
4957 
4958 static inline void
4959 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
4960 {
4961 	struct nfs4_lockowner *lo;
4962 
4963 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4964 		lo = (struct nfs4_lockowner *) fl->fl_owner;
4965 		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
4966 					lo->lo_owner.so_owner.len, GFP_KERNEL);
4967 		if (!deny->ld_owner.data)
4968 			/* We just don't care that much */
4969 			goto nevermind;
4970 		deny->ld_owner.len = lo->lo_owner.so_owner.len;
4971 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4972 	} else {
4973 nevermind:
4974 		deny->ld_owner.len = 0;
4975 		deny->ld_owner.data = NULL;
4976 		deny->ld_clientid.cl_boot = 0;
4977 		deny->ld_clientid.cl_id = 0;
4978 	}
4979 	deny->ld_start = fl->fl_start;
4980 	deny->ld_length = NFS4_MAX_UINT64;
4981 	if (fl->fl_end != NFS4_MAX_UINT64)
4982 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
4983 	deny->ld_type = NFS4_READ_LT;
4984 	if (fl->fl_type != F_RDLCK)
4985 		deny->ld_type = NFS4_WRITE_LT;
4986 }
4987 
4988 static struct nfs4_lockowner *
4989 find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
4990 		struct nfs4_client *clp)
4991 {
4992 	unsigned int strhashval = ownerstr_hashval(owner);
4993 	struct nfs4_stateowner *so;
4994 
4995 	lockdep_assert_held(&clp->cl_lock);
4996 
4997 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
4998 			    so_strhash) {
4999 		if (so->so_is_open_owner)
5000 			continue;
5001 		if (same_owner_str(so, owner))
5002 			return lockowner(nfs4_get_stateowner(so));
5003 	}
5004 	return NULL;
5005 }
5006 
5007 static struct nfs4_lockowner *
5008 find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
5009 		struct nfs4_client *clp)
5010 {
5011 	struct nfs4_lockowner *lo;
5012 
5013 	spin_lock(&clp->cl_lock);
5014 	lo = find_lockowner_str_locked(clid, owner, clp);
5015 	spin_unlock(&clp->cl_lock);
5016 	return lo;
5017 }
5018 
5019 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5020 {
5021 	unhash_lockowner_locked(lockowner(sop));
5022 }
5023 
5024 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5025 {
5026 	struct nfs4_lockowner *lo = lockowner(sop);
5027 
5028 	kmem_cache_free(lockowner_slab, lo);
5029 }
5030 
5031 static const struct nfs4_stateowner_operations lockowner_ops = {
5032 	.so_unhash =	nfs4_unhash_lockowner,
5033 	.so_free =	nfs4_free_lockowner,
5034 };
5035 
5036 /*
5037  * Alloc a lock owner structure.
5038  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5039  * occurred.
5040  *
5041  * strhashval = ownerstr_hashval
5042  */
5043 static struct nfs4_lockowner *
5044 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5045 			   struct nfs4_ol_stateid *open_stp,
5046 			   struct nfsd4_lock *lock)
5047 {
5048 	struct nfs4_lockowner *lo, *ret;
5049 
5050 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5051 	if (!lo)
5052 		return NULL;
5053 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5054 	lo->lo_owner.so_is_open_owner = 0;
5055 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5056 	lo->lo_owner.so_ops = &lockowner_ops;
5057 	spin_lock(&clp->cl_lock);
5058 	ret = find_lockowner_str_locked(&clp->cl_clientid,
5059 			&lock->lk_new_owner, clp);
5060 	if (ret == NULL) {
5061 		list_add(&lo->lo_owner.so_strhash,
5062 			 &clp->cl_ownerstr_hashtbl[strhashval]);
5063 		ret = lo;
5064 	} else
5065 		nfs4_free_lockowner(&lo->lo_owner);
5066 	spin_unlock(&clp->cl_lock);
5067 	return ret;
5068 }
5069 
5070 static void
5071 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5072 		  struct nfs4_file *fp, struct inode *inode,
5073 		  struct nfs4_ol_stateid *open_stp)
5074 {
5075 	struct nfs4_client *clp = lo->lo_owner.so_client;
5076 
5077 	lockdep_assert_held(&clp->cl_lock);
5078 
5079 	atomic_inc(&stp->st_stid.sc_count);
5080 	stp->st_stid.sc_type = NFS4_LOCK_STID;
5081 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5082 	get_nfs4_file(fp);
5083 	stp->st_stid.sc_file = fp;
5084 	stp->st_stid.sc_free = nfs4_free_lock_stateid;
5085 	stp->st_access_bmap = 0;
5086 	stp->st_deny_bmap = open_stp->st_deny_bmap;
5087 	stp->st_openstp = open_stp;
5088 	list_add(&stp->st_locks, &open_stp->st_locks);
5089 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5090 	spin_lock(&fp->fi_lock);
5091 	list_add(&stp->st_perfile, &fp->fi_stateids);
5092 	spin_unlock(&fp->fi_lock);
5093 }
5094 
5095 static struct nfs4_ol_stateid *
5096 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5097 {
5098 	struct nfs4_ol_stateid *lst;
5099 	struct nfs4_client *clp = lo->lo_owner.so_client;
5100 
5101 	lockdep_assert_held(&clp->cl_lock);
5102 
5103 	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5104 		if (lst->st_stid.sc_file == fp) {
5105 			atomic_inc(&lst->st_stid.sc_count);
5106 			return lst;
5107 		}
5108 	}
5109 	return NULL;
5110 }
5111 
5112 static struct nfs4_ol_stateid *
5113 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5114 			    struct inode *inode, struct nfs4_ol_stateid *ost,
5115 			    bool *new)
5116 {
5117 	struct nfs4_stid *ns = NULL;
5118 	struct nfs4_ol_stateid *lst;
5119 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5120 	struct nfs4_client *clp = oo->oo_owner.so_client;
5121 
5122 	spin_lock(&clp->cl_lock);
5123 	lst = find_lock_stateid(lo, fi);
5124 	if (lst == NULL) {
5125 		spin_unlock(&clp->cl_lock);
5126 		ns = nfs4_alloc_stid(clp, stateid_slab);
5127 		if (ns == NULL)
5128 			return NULL;
5129 
5130 		spin_lock(&clp->cl_lock);
5131 		lst = find_lock_stateid(lo, fi);
5132 		if (likely(!lst)) {
5133 			lst = openlockstateid(ns);
5134 			init_lock_stateid(lst, lo, fi, inode, ost);
5135 			ns = NULL;
5136 			*new = true;
5137 		}
5138 	}
5139 	spin_unlock(&clp->cl_lock);
5140 	if (ns)
5141 		nfs4_put_stid(ns);
5142 	return lst;
5143 }
5144 
5145 static int
5146 check_lock_length(u64 offset, u64 length)
5147 {
5148 	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
5149 	     LOFF_OVERFLOW(offset, length)));
5150 }
5151 
5152 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5153 {
5154 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5155 
5156 	lockdep_assert_held(&fp->fi_lock);
5157 
5158 	if (test_access(access, lock_stp))
5159 		return;
5160 	__nfs4_file_get_access(fp, access);
5161 	set_access(access, lock_stp);
5162 }
5163 
5164 static __be32
5165 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5166 			    struct nfs4_ol_stateid *ost,
5167 			    struct nfsd4_lock *lock,
5168 			    struct nfs4_ol_stateid **lst, bool *new)
5169 {
5170 	__be32 status;
5171 	struct nfs4_file *fi = ost->st_stid.sc_file;
5172 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5173 	struct nfs4_client *cl = oo->oo_owner.so_client;
5174 	struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
5175 	struct nfs4_lockowner *lo;
5176 	unsigned int strhashval;
5177 
5178 	lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
5179 	if (!lo) {
5180 		strhashval = ownerstr_hashval(&lock->v.new.owner);
5181 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5182 		if (lo == NULL)
5183 			return nfserr_jukebox;
5184 	} else {
5185 		/* with an existing lockowner, seqids must be the same */
5186 		status = nfserr_bad_seqid;
5187 		if (!cstate->minorversion &&
5188 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5189 			goto out;
5190 	}
5191 
5192 	*lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5193 	if (*lst == NULL) {
5194 		status = nfserr_jukebox;
5195 		goto out;
5196 	}
5197 	status = nfs_ok;
5198 out:
5199 	nfs4_put_stateowner(&lo->lo_owner);
5200 	return status;
5201 }
5202 
5203 /*
5204  *  LOCK operation
5205  */
5206 __be32
5207 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5208 	   struct nfsd4_lock *lock)
5209 {
5210 	struct nfs4_openowner *open_sop = NULL;
5211 	struct nfs4_lockowner *lock_sop = NULL;
5212 	struct nfs4_ol_stateid *lock_stp = NULL;
5213 	struct nfs4_ol_stateid *open_stp = NULL;
5214 	struct nfs4_file *fp;
5215 	struct file *filp = NULL;
5216 	struct file_lock *file_lock = NULL;
5217 	struct file_lock *conflock = NULL;
5218 	__be32 status = 0;
5219 	int lkflg;
5220 	int err;
5221 	bool new = false;
5222 	struct net *net = SVC_NET(rqstp);
5223 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5224 
5225 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5226 		(long long) lock->lk_offset,
5227 		(long long) lock->lk_length);
5228 
5229 	if (check_lock_length(lock->lk_offset, lock->lk_length))
5230 		 return nfserr_inval;
5231 
5232 	if ((status = fh_verify(rqstp, &cstate->current_fh,
5233 				S_IFREG, NFSD_MAY_LOCK))) {
5234 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
5235 		return status;
5236 	}
5237 
5238 	if (lock->lk_is_new) {
5239 		if (nfsd4_has_session(cstate))
5240 			/* See rfc 5661 18.10.3: given clientid is ignored: */
5241 			memcpy(&lock->v.new.clientid,
5242 				&cstate->session->se_client->cl_clientid,
5243 				sizeof(clientid_t));
5244 
5245 		status = nfserr_stale_clientid;
5246 		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5247 			goto out;
5248 
5249 		/* validate and update open stateid and open seqid */
5250 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
5251 				        lock->lk_new_open_seqid,
5252 		                        &lock->lk_new_open_stateid,
5253 					&open_stp, nn);
5254 		if (status)
5255 			goto out;
5256 		open_sop = openowner(open_stp->st_stateowner);
5257 		status = nfserr_bad_stateid;
5258 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5259 						&lock->v.new.clientid))
5260 			goto out;
5261 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
5262 							&lock_stp, &new);
5263 	} else {
5264 		status = nfs4_preprocess_seqid_op(cstate,
5265 				       lock->lk_old_lock_seqid,
5266 				       &lock->lk_old_lock_stateid,
5267 				       NFS4_LOCK_STID, &lock_stp, nn);
5268 	}
5269 	if (status)
5270 		goto out;
5271 	lock_sop = lockowner(lock_stp->st_stateowner);
5272 
5273 	lkflg = setlkflg(lock->lk_type);
5274 	status = nfs4_check_openmode(lock_stp, lkflg);
5275 	if (status)
5276 		goto out;
5277 
5278 	status = nfserr_grace;
5279 	if (locks_in_grace(net) && !lock->lk_reclaim)
5280 		goto out;
5281 	status = nfserr_no_grace;
5282 	if (!locks_in_grace(net) && lock->lk_reclaim)
5283 		goto out;
5284 
5285 	file_lock = locks_alloc_lock();
5286 	if (!file_lock) {
5287 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5288 		status = nfserr_jukebox;
5289 		goto out;
5290 	}
5291 
5292 	fp = lock_stp->st_stid.sc_file;
5293 	switch (lock->lk_type) {
5294 		case NFS4_READ_LT:
5295 		case NFS4_READW_LT:
5296 			spin_lock(&fp->fi_lock);
5297 			filp = find_readable_file_locked(fp);
5298 			if (filp)
5299 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5300 			spin_unlock(&fp->fi_lock);
5301 			file_lock->fl_type = F_RDLCK;
5302 			break;
5303 		case NFS4_WRITE_LT:
5304 		case NFS4_WRITEW_LT:
5305 			spin_lock(&fp->fi_lock);
5306 			filp = find_writeable_file_locked(fp);
5307 			if (filp)
5308 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5309 			spin_unlock(&fp->fi_lock);
5310 			file_lock->fl_type = F_WRLCK;
5311 			break;
5312 		default:
5313 			status = nfserr_inval;
5314 		goto out;
5315 	}
5316 	if (!filp) {
5317 		status = nfserr_openmode;
5318 		goto out;
5319 	}
5320 
5321 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
5322 	file_lock->fl_pid = current->tgid;
5323 	file_lock->fl_file = filp;
5324 	file_lock->fl_flags = FL_POSIX;
5325 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
5326 	file_lock->fl_start = lock->lk_offset;
5327 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
5328 	nfs4_transform_lock_offset(file_lock);
5329 
5330 	conflock = locks_alloc_lock();
5331 	if (!conflock) {
5332 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5333 		status = nfserr_jukebox;
5334 		goto out;
5335 	}
5336 
5337 	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5338 	switch (-err) {
5339 	case 0: /* success! */
5340 		update_stateid(&lock_stp->st_stid.sc_stateid);
5341 		memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
5342 				sizeof(stateid_t));
5343 		status = 0;
5344 		break;
5345 	case (EAGAIN):		/* conflock holds conflicting lock */
5346 		status = nfserr_denied;
5347 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5348 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
5349 		break;
5350 	case (EDEADLK):
5351 		status = nfserr_deadlock;
5352 		break;
5353 	default:
5354 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5355 		status = nfserrno(err);
5356 		break;
5357 	}
5358 out:
5359 	if (filp)
5360 		fput(filp);
5361 	if (lock_stp) {
5362 		/* Bump seqid manually if the 4.0 replay owner is openowner */
5363 		if (cstate->replay_owner &&
5364 		    cstate->replay_owner != &lock_sop->lo_owner &&
5365 		    seqid_mutating_err(ntohl(status)))
5366 			lock_sop->lo_owner.so_seqid++;
5367 
5368 		/*
5369 		 * If this is a new, never-before-used stateid, and we are
5370 		 * returning an error, then just go ahead and release it.
5371 		 */
5372 		if (status && new)
5373 			release_lock_stateid(lock_stp);
5374 
5375 		nfs4_put_stid(&lock_stp->st_stid);
5376 	}
5377 	if (open_stp)
5378 		nfs4_put_stid(&open_stp->st_stid);
5379 	nfsd4_bump_seqid(cstate, status);
5380 	if (file_lock)
5381 		locks_free_lock(file_lock);
5382 	if (conflock)
5383 		locks_free_lock(conflock);
5384 	return status;
5385 }
5386 
5387 /*
5388  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5389  * so we do a temporary open here just to get an open file to pass to
5390  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
5391  * inode operation.)
5392  */
5393 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5394 {
5395 	struct file *file;
5396 	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
5397 	if (!err) {
5398 		err = nfserrno(vfs_test_lock(file, lock));
5399 		nfsd_close(file);
5400 	}
5401 	return err;
5402 }
5403 
5404 /*
5405  * LOCKT operation
5406  */
5407 __be32
5408 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5409 	    struct nfsd4_lockt *lockt)
5410 {
5411 	struct file_lock *file_lock = NULL;
5412 	struct nfs4_lockowner *lo = NULL;
5413 	__be32 status;
5414 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5415 
5416 	if (locks_in_grace(SVC_NET(rqstp)))
5417 		return nfserr_grace;
5418 
5419 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
5420 		 return nfserr_inval;
5421 
5422 	if (!nfsd4_has_session(cstate)) {
5423 		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5424 		if (status)
5425 			goto out;
5426 	}
5427 
5428 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5429 		goto out;
5430 
5431 	file_lock = locks_alloc_lock();
5432 	if (!file_lock) {
5433 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5434 		status = nfserr_jukebox;
5435 		goto out;
5436 	}
5437 
5438 	switch (lockt->lt_type) {
5439 		case NFS4_READ_LT:
5440 		case NFS4_READW_LT:
5441 			file_lock->fl_type = F_RDLCK;
5442 		break;
5443 		case NFS4_WRITE_LT:
5444 		case NFS4_WRITEW_LT:
5445 			file_lock->fl_type = F_WRLCK;
5446 		break;
5447 		default:
5448 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5449 			status = nfserr_inval;
5450 		goto out;
5451 	}
5452 
5453 	lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
5454 				cstate->clp);
5455 	if (lo)
5456 		file_lock->fl_owner = (fl_owner_t)lo;
5457 	file_lock->fl_pid = current->tgid;
5458 	file_lock->fl_flags = FL_POSIX;
5459 
5460 	file_lock->fl_start = lockt->lt_offset;
5461 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
5462 
5463 	nfs4_transform_lock_offset(file_lock);
5464 
5465 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5466 	if (status)
5467 		goto out;
5468 
5469 	if (file_lock->fl_type != F_UNLCK) {
5470 		status = nfserr_denied;
5471 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
5472 	}
5473 out:
5474 	if (lo)
5475 		nfs4_put_stateowner(&lo->lo_owner);
5476 	if (file_lock)
5477 		locks_free_lock(file_lock);
5478 	return status;
5479 }
5480 
5481 __be32
5482 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5483 	    struct nfsd4_locku *locku)
5484 {
5485 	struct nfs4_ol_stateid *stp;
5486 	struct file *filp = NULL;
5487 	struct file_lock *file_lock = NULL;
5488 	__be32 status;
5489 	int err;
5490 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5491 
5492 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5493 		(long long) locku->lu_offset,
5494 		(long long) locku->lu_length);
5495 
5496 	if (check_lock_length(locku->lu_offset, locku->lu_length))
5497 		 return nfserr_inval;
5498 
5499 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5500 					&locku->lu_stateid, NFS4_LOCK_STID,
5501 					&stp, nn);
5502 	if (status)
5503 		goto out;
5504 	filp = find_any_file(stp->st_stid.sc_file);
5505 	if (!filp) {
5506 		status = nfserr_lock_range;
5507 		goto put_stateid;
5508 	}
5509 	file_lock = locks_alloc_lock();
5510 	if (!file_lock) {
5511 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5512 		status = nfserr_jukebox;
5513 		goto fput;
5514 	}
5515 
5516 	file_lock->fl_type = F_UNLCK;
5517 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
5518 	file_lock->fl_pid = current->tgid;
5519 	file_lock->fl_file = filp;
5520 	file_lock->fl_flags = FL_POSIX;
5521 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
5522 	file_lock->fl_start = locku->lu_offset;
5523 
5524 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
5525 						locku->lu_length);
5526 	nfs4_transform_lock_offset(file_lock);
5527 
5528 	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5529 	if (err) {
5530 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5531 		goto out_nfserr;
5532 	}
5533 	update_stateid(&stp->st_stid.sc_stateid);
5534 	memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
5535 fput:
5536 	fput(filp);
5537 put_stateid:
5538 	nfs4_put_stid(&stp->st_stid);
5539 out:
5540 	nfsd4_bump_seqid(cstate, status);
5541 	if (file_lock)
5542 		locks_free_lock(file_lock);
5543 	return status;
5544 
5545 out_nfserr:
5546 	status = nfserrno(err);
5547 	goto fput;
5548 }
5549 
5550 /*
5551  * returns
5552  * 	true:  locks held by lockowner
5553  * 	false: no locks held by lockowner
5554  */
5555 static bool
5556 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5557 {
5558 	struct file_lock *fl;
5559 	int status = false;
5560 	struct file *filp = find_any_file(fp);
5561 	struct inode *inode;
5562 	struct file_lock_context *flctx;
5563 
5564 	if (!filp) {
5565 		/* Any valid lock stateid should have some sort of access */
5566 		WARN_ON_ONCE(1);
5567 		return status;
5568 	}
5569 
5570 	inode = file_inode(filp);
5571 	flctx = inode->i_flctx;
5572 
5573 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5574 		spin_lock(&flctx->flc_lock);
5575 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5576 			if (fl->fl_owner == (fl_owner_t)lowner) {
5577 				status = true;
5578 				break;
5579 			}
5580 		}
5581 		spin_unlock(&flctx->flc_lock);
5582 	}
5583 	fput(filp);
5584 	return status;
5585 }
5586 
5587 __be32
5588 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5589 			struct nfsd4_compound_state *cstate,
5590 			struct nfsd4_release_lockowner *rlockowner)
5591 {
5592 	clientid_t *clid = &rlockowner->rl_clientid;
5593 	struct nfs4_stateowner *sop;
5594 	struct nfs4_lockowner *lo = NULL;
5595 	struct nfs4_ol_stateid *stp;
5596 	struct xdr_netobj *owner = &rlockowner->rl_owner;
5597 	unsigned int hashval = ownerstr_hashval(owner);
5598 	__be32 status;
5599 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5600 	struct nfs4_client *clp;
5601 
5602 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5603 		clid->cl_boot, clid->cl_id);
5604 
5605 	status = lookup_clientid(clid, cstate, nn);
5606 	if (status)
5607 		return status;
5608 
5609 	clp = cstate->clp;
5610 	/* Find the matching lock stateowner */
5611 	spin_lock(&clp->cl_lock);
5612 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
5613 			    so_strhash) {
5614 
5615 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
5616 			continue;
5617 
5618 		/* see if there are still any locks associated with it */
5619 		lo = lockowner(sop);
5620 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5621 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
5622 				status = nfserr_locks_held;
5623 				spin_unlock(&clp->cl_lock);
5624 				return status;
5625 			}
5626 		}
5627 
5628 		nfs4_get_stateowner(sop);
5629 		break;
5630 	}
5631 	spin_unlock(&clp->cl_lock);
5632 	if (lo)
5633 		release_lockowner(lo);
5634 	return status;
5635 }
5636 
5637 static inline struct nfs4_client_reclaim *
5638 alloc_reclaim(void)
5639 {
5640 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
5641 }
5642 
5643 bool
5644 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5645 {
5646 	struct nfs4_client_reclaim *crp;
5647 
5648 	crp = nfsd4_find_reclaim_client(name, nn);
5649 	return (crp && crp->cr_clp);
5650 }
5651 
5652 /*
5653  * failure => all reset bets are off, nfserr_no_grace...
5654  */
5655 struct nfs4_client_reclaim *
5656 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
5657 {
5658 	unsigned int strhashval;
5659 	struct nfs4_client_reclaim *crp;
5660 
5661 	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
5662 	crp = alloc_reclaim();
5663 	if (crp) {
5664 		strhashval = clientstr_hashval(name);
5665 		INIT_LIST_HEAD(&crp->cr_strhash);
5666 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
5667 		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
5668 		crp->cr_clp = NULL;
5669 		nn->reclaim_str_hashtbl_size++;
5670 	}
5671 	return crp;
5672 }
5673 
5674 void
5675 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
5676 {
5677 	list_del(&crp->cr_strhash);
5678 	kfree(crp);
5679 	nn->reclaim_str_hashtbl_size--;
5680 }
5681 
5682 void
5683 nfs4_release_reclaim(struct nfsd_net *nn)
5684 {
5685 	struct nfs4_client_reclaim *crp = NULL;
5686 	int i;
5687 
5688 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5689 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
5690 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
5691 			                struct nfs4_client_reclaim, cr_strhash);
5692 			nfs4_remove_reclaim_record(crp, nn);
5693 		}
5694 	}
5695 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
5696 }
5697 
5698 /*
5699  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5700 struct nfs4_client_reclaim *
5701 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
5702 {
5703 	unsigned int strhashval;
5704 	struct nfs4_client_reclaim *crp = NULL;
5705 
5706 	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
5707 
5708 	strhashval = clientstr_hashval(recdir);
5709 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
5710 		if (same_name(crp->cr_recdir, recdir)) {
5711 			return crp;
5712 		}
5713 	}
5714 	return NULL;
5715 }
5716 
5717 /*
5718 * Called from OPEN. Look for clientid in reclaim list.
5719 */
5720 __be32
5721 nfs4_check_open_reclaim(clientid_t *clid,
5722 		struct nfsd4_compound_state *cstate,
5723 		struct nfsd_net *nn)
5724 {
5725 	__be32 status;
5726 
5727 	/* find clientid in conf_id_hashtbl */
5728 	status = lookup_clientid(clid, cstate, nn);
5729 	if (status)
5730 		return nfserr_reclaim_bad;
5731 
5732 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
5733 		return nfserr_no_grace;
5734 
5735 	if (nfsd4_client_record_check(cstate->clp))
5736 		return nfserr_reclaim_bad;
5737 
5738 	return nfs_ok;
5739 }
5740 
5741 #ifdef CONFIG_NFSD_FAULT_INJECTION
5742 static inline void
5743 put_client(struct nfs4_client *clp)
5744 {
5745 	atomic_dec(&clp->cl_refcount);
5746 }
5747 
5748 static struct nfs4_client *
5749 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
5750 {
5751 	struct nfs4_client *clp;
5752 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5753 					  nfsd_net_id);
5754 
5755 	if (!nfsd_netns_ready(nn))
5756 		return NULL;
5757 
5758 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5759 		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
5760 			return clp;
5761 	}
5762 	return NULL;
5763 }
5764 
5765 u64
5766 nfsd_inject_print_clients(void)
5767 {
5768 	struct nfs4_client *clp;
5769 	u64 count = 0;
5770 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5771 					  nfsd_net_id);
5772 	char buf[INET6_ADDRSTRLEN];
5773 
5774 	if (!nfsd_netns_ready(nn))
5775 		return 0;
5776 
5777 	spin_lock(&nn->client_lock);
5778 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5779 		rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5780 		pr_info("NFS Client: %s\n", buf);
5781 		++count;
5782 	}
5783 	spin_unlock(&nn->client_lock);
5784 
5785 	return count;
5786 }
5787 
5788 u64
5789 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
5790 {
5791 	u64 count = 0;
5792 	struct nfs4_client *clp;
5793 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5794 					  nfsd_net_id);
5795 
5796 	if (!nfsd_netns_ready(nn))
5797 		return count;
5798 
5799 	spin_lock(&nn->client_lock);
5800 	clp = nfsd_find_client(addr, addr_size);
5801 	if (clp) {
5802 		if (mark_client_expired_locked(clp) == nfs_ok)
5803 			++count;
5804 		else
5805 			clp = NULL;
5806 	}
5807 	spin_unlock(&nn->client_lock);
5808 
5809 	if (clp)
5810 		expire_client(clp);
5811 
5812 	return count;
5813 }
5814 
5815 u64
5816 nfsd_inject_forget_clients(u64 max)
5817 {
5818 	u64 count = 0;
5819 	struct nfs4_client *clp, *next;
5820 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5821 						nfsd_net_id);
5822 	LIST_HEAD(reaplist);
5823 
5824 	if (!nfsd_netns_ready(nn))
5825 		return count;
5826 
5827 	spin_lock(&nn->client_lock);
5828 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5829 		if (mark_client_expired_locked(clp) == nfs_ok) {
5830 			list_add(&clp->cl_lru, &reaplist);
5831 			if (max != 0 && ++count >= max)
5832 				break;
5833 		}
5834 	}
5835 	spin_unlock(&nn->client_lock);
5836 
5837 	list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
5838 		expire_client(clp);
5839 
5840 	return count;
5841 }
5842 
5843 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
5844 			     const char *type)
5845 {
5846 	char buf[INET6_ADDRSTRLEN];
5847 	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5848 	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
5849 }
5850 
5851 static void
5852 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
5853 			     struct list_head *collect)
5854 {
5855 	struct nfs4_client *clp = lst->st_stid.sc_client;
5856 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5857 					  nfsd_net_id);
5858 
5859 	if (!collect)
5860 		return;
5861 
5862 	lockdep_assert_held(&nn->client_lock);
5863 	atomic_inc(&clp->cl_refcount);
5864 	list_add(&lst->st_locks, collect);
5865 }
5866 
5867 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
5868 				    struct list_head *collect,
5869 				    void (*func)(struct nfs4_ol_stateid *))
5870 {
5871 	struct nfs4_openowner *oop;
5872 	struct nfs4_ol_stateid *stp, *st_next;
5873 	struct nfs4_ol_stateid *lst, *lst_next;
5874 	u64 count = 0;
5875 
5876 	spin_lock(&clp->cl_lock);
5877 	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5878 		list_for_each_entry_safe(stp, st_next,
5879 				&oop->oo_owner.so_stateids, st_perstateowner) {
5880 			list_for_each_entry_safe(lst, lst_next,
5881 					&stp->st_locks, st_locks) {
5882 				if (func) {
5883 					func(lst);
5884 					nfsd_inject_add_lock_to_list(lst,
5885 								collect);
5886 				}
5887 				++count;
5888 				/*
5889 				 * Despite the fact that these functions deal
5890 				 * with 64-bit integers for "count", we must
5891 				 * ensure that it doesn't blow up the
5892 				 * clp->cl_refcount. Throw a warning if we
5893 				 * start to approach INT_MAX here.
5894 				 */
5895 				WARN_ON_ONCE(count == (INT_MAX / 2));
5896 				if (count == max)
5897 					goto out;
5898 			}
5899 		}
5900 	}
5901 out:
5902 	spin_unlock(&clp->cl_lock);
5903 
5904 	return count;
5905 }
5906 
5907 static u64
5908 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
5909 			  u64 max)
5910 {
5911 	return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
5912 }
5913 
5914 static u64
5915 nfsd_print_client_locks(struct nfs4_client *clp)
5916 {
5917 	u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
5918 	nfsd_print_count(clp, count, "locked files");
5919 	return count;
5920 }
5921 
5922 u64
5923 nfsd_inject_print_locks(void)
5924 {
5925 	struct nfs4_client *clp;
5926 	u64 count = 0;
5927 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5928 						nfsd_net_id);
5929 
5930 	if (!nfsd_netns_ready(nn))
5931 		return 0;
5932 
5933 	spin_lock(&nn->client_lock);
5934 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
5935 		count += nfsd_print_client_locks(clp);
5936 	spin_unlock(&nn->client_lock);
5937 
5938 	return count;
5939 }
5940 
5941 static void
5942 nfsd_reap_locks(struct list_head *reaplist)
5943 {
5944 	struct nfs4_client *clp;
5945 	struct nfs4_ol_stateid *stp, *next;
5946 
5947 	list_for_each_entry_safe(stp, next, reaplist, st_locks) {
5948 		list_del_init(&stp->st_locks);
5949 		clp = stp->st_stid.sc_client;
5950 		nfs4_put_stid(&stp->st_stid);
5951 		put_client(clp);
5952 	}
5953 }
5954 
5955 u64
5956 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
5957 {
5958 	unsigned int count = 0;
5959 	struct nfs4_client *clp;
5960 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5961 						nfsd_net_id);
5962 	LIST_HEAD(reaplist);
5963 
5964 	if (!nfsd_netns_ready(nn))
5965 		return count;
5966 
5967 	spin_lock(&nn->client_lock);
5968 	clp = nfsd_find_client(addr, addr_size);
5969 	if (clp)
5970 		count = nfsd_collect_client_locks(clp, &reaplist, 0);
5971 	spin_unlock(&nn->client_lock);
5972 	nfsd_reap_locks(&reaplist);
5973 	return count;
5974 }
5975 
5976 u64
5977 nfsd_inject_forget_locks(u64 max)
5978 {
5979 	u64 count = 0;
5980 	struct nfs4_client *clp;
5981 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5982 						nfsd_net_id);
5983 	LIST_HEAD(reaplist);
5984 
5985 	if (!nfsd_netns_ready(nn))
5986 		return count;
5987 
5988 	spin_lock(&nn->client_lock);
5989 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5990 		count += nfsd_collect_client_locks(clp, &reaplist, max - count);
5991 		if (max != 0 && count >= max)
5992 			break;
5993 	}
5994 	spin_unlock(&nn->client_lock);
5995 	nfsd_reap_locks(&reaplist);
5996 	return count;
5997 }
5998 
5999 static u64
6000 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6001 			      struct list_head *collect,
6002 			      void (*func)(struct nfs4_openowner *))
6003 {
6004 	struct nfs4_openowner *oop, *next;
6005 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6006 						nfsd_net_id);
6007 	u64 count = 0;
6008 
6009 	lockdep_assert_held(&nn->client_lock);
6010 
6011 	spin_lock(&clp->cl_lock);
6012 	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6013 		if (func) {
6014 			func(oop);
6015 			if (collect) {
6016 				atomic_inc(&clp->cl_refcount);
6017 				list_add(&oop->oo_perclient, collect);
6018 			}
6019 		}
6020 		++count;
6021 		/*
6022 		 * Despite the fact that these functions deal with
6023 		 * 64-bit integers for "count", we must ensure that
6024 		 * it doesn't blow up the clp->cl_refcount. Throw a
6025 		 * warning if we start to approach INT_MAX here.
6026 		 */
6027 		WARN_ON_ONCE(count == (INT_MAX / 2));
6028 		if (count == max)
6029 			break;
6030 	}
6031 	spin_unlock(&clp->cl_lock);
6032 
6033 	return count;
6034 }
6035 
6036 static u64
6037 nfsd_print_client_openowners(struct nfs4_client *clp)
6038 {
6039 	u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6040 
6041 	nfsd_print_count(clp, count, "openowners");
6042 	return count;
6043 }
6044 
6045 static u64
6046 nfsd_collect_client_openowners(struct nfs4_client *clp,
6047 			       struct list_head *collect, u64 max)
6048 {
6049 	return nfsd_foreach_client_openowner(clp, max, collect,
6050 						unhash_openowner_locked);
6051 }
6052 
6053 u64
6054 nfsd_inject_print_openowners(void)
6055 {
6056 	struct nfs4_client *clp;
6057 	u64 count = 0;
6058 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6059 						nfsd_net_id);
6060 
6061 	if (!nfsd_netns_ready(nn))
6062 		return 0;
6063 
6064 	spin_lock(&nn->client_lock);
6065 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6066 		count += nfsd_print_client_openowners(clp);
6067 	spin_unlock(&nn->client_lock);
6068 
6069 	return count;
6070 }
6071 
6072 static void
6073 nfsd_reap_openowners(struct list_head *reaplist)
6074 {
6075 	struct nfs4_client *clp;
6076 	struct nfs4_openowner *oop, *next;
6077 
6078 	list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6079 		list_del_init(&oop->oo_perclient);
6080 		clp = oop->oo_owner.so_client;
6081 		release_openowner(oop);
6082 		put_client(clp);
6083 	}
6084 }
6085 
6086 u64
6087 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6088 				     size_t addr_size)
6089 {
6090 	unsigned int count = 0;
6091 	struct nfs4_client *clp;
6092 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6093 						nfsd_net_id);
6094 	LIST_HEAD(reaplist);
6095 
6096 	if (!nfsd_netns_ready(nn))
6097 		return count;
6098 
6099 	spin_lock(&nn->client_lock);
6100 	clp = nfsd_find_client(addr, addr_size);
6101 	if (clp)
6102 		count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6103 	spin_unlock(&nn->client_lock);
6104 	nfsd_reap_openowners(&reaplist);
6105 	return count;
6106 }
6107 
6108 u64
6109 nfsd_inject_forget_openowners(u64 max)
6110 {
6111 	u64 count = 0;
6112 	struct nfs4_client *clp;
6113 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6114 						nfsd_net_id);
6115 	LIST_HEAD(reaplist);
6116 
6117 	if (!nfsd_netns_ready(nn))
6118 		return count;
6119 
6120 	spin_lock(&nn->client_lock);
6121 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6122 		count += nfsd_collect_client_openowners(clp, &reaplist,
6123 							max - count);
6124 		if (max != 0 && count >= max)
6125 			break;
6126 	}
6127 	spin_unlock(&nn->client_lock);
6128 	nfsd_reap_openowners(&reaplist);
6129 	return count;
6130 }
6131 
6132 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6133 				     struct list_head *victims)
6134 {
6135 	struct nfs4_delegation *dp, *next;
6136 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6137 						nfsd_net_id);
6138 	u64 count = 0;
6139 
6140 	lockdep_assert_held(&nn->client_lock);
6141 
6142 	spin_lock(&state_lock);
6143 	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6144 		if (victims) {
6145 			/*
6146 			 * It's not safe to mess with delegations that have a
6147 			 * non-zero dl_time. They might have already been broken
6148 			 * and could be processed by the laundromat outside of
6149 			 * the state_lock. Just leave them be.
6150 			 */
6151 			if (dp->dl_time != 0)
6152 				continue;
6153 
6154 			atomic_inc(&clp->cl_refcount);
6155 			unhash_delegation_locked(dp);
6156 			list_add(&dp->dl_recall_lru, victims);
6157 		}
6158 		++count;
6159 		/*
6160 		 * Despite the fact that these functions deal with
6161 		 * 64-bit integers for "count", we must ensure that
6162 		 * it doesn't blow up the clp->cl_refcount. Throw a
6163 		 * warning if we start to approach INT_MAX here.
6164 		 */
6165 		WARN_ON_ONCE(count == (INT_MAX / 2));
6166 		if (count == max)
6167 			break;
6168 	}
6169 	spin_unlock(&state_lock);
6170 	return count;
6171 }
6172 
6173 static u64
6174 nfsd_print_client_delegations(struct nfs4_client *clp)
6175 {
6176 	u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6177 
6178 	nfsd_print_count(clp, count, "delegations");
6179 	return count;
6180 }
6181 
6182 u64
6183 nfsd_inject_print_delegations(void)
6184 {
6185 	struct nfs4_client *clp;
6186 	u64 count = 0;
6187 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6188 						nfsd_net_id);
6189 
6190 	if (!nfsd_netns_ready(nn))
6191 		return 0;
6192 
6193 	spin_lock(&nn->client_lock);
6194 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6195 		count += nfsd_print_client_delegations(clp);
6196 	spin_unlock(&nn->client_lock);
6197 
6198 	return count;
6199 }
6200 
6201 static void
6202 nfsd_forget_delegations(struct list_head *reaplist)
6203 {
6204 	struct nfs4_client *clp;
6205 	struct nfs4_delegation *dp, *next;
6206 
6207 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6208 		list_del_init(&dp->dl_recall_lru);
6209 		clp = dp->dl_stid.sc_client;
6210 		revoke_delegation(dp);
6211 		put_client(clp);
6212 	}
6213 }
6214 
6215 u64
6216 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6217 				      size_t addr_size)
6218 {
6219 	u64 count = 0;
6220 	struct nfs4_client *clp;
6221 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6222 						nfsd_net_id);
6223 	LIST_HEAD(reaplist);
6224 
6225 	if (!nfsd_netns_ready(nn))
6226 		return count;
6227 
6228 	spin_lock(&nn->client_lock);
6229 	clp = nfsd_find_client(addr, addr_size);
6230 	if (clp)
6231 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6232 	spin_unlock(&nn->client_lock);
6233 
6234 	nfsd_forget_delegations(&reaplist);
6235 	return count;
6236 }
6237 
6238 u64
6239 nfsd_inject_forget_delegations(u64 max)
6240 {
6241 	u64 count = 0;
6242 	struct nfs4_client *clp;
6243 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6244 						nfsd_net_id);
6245 	LIST_HEAD(reaplist);
6246 
6247 	if (!nfsd_netns_ready(nn))
6248 		return count;
6249 
6250 	spin_lock(&nn->client_lock);
6251 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6252 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6253 		if (max != 0 && count >= max)
6254 			break;
6255 	}
6256 	spin_unlock(&nn->client_lock);
6257 	nfsd_forget_delegations(&reaplist);
6258 	return count;
6259 }
6260 
6261 static void
6262 nfsd_recall_delegations(struct list_head *reaplist)
6263 {
6264 	struct nfs4_client *clp;
6265 	struct nfs4_delegation *dp, *next;
6266 
6267 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6268 		list_del_init(&dp->dl_recall_lru);
6269 		clp = dp->dl_stid.sc_client;
6270 		/*
6271 		 * We skipped all entries that had a zero dl_time before,
6272 		 * so we can now reset the dl_time back to 0. If a delegation
6273 		 * break comes in now, then it won't make any difference since
6274 		 * we're recalling it either way.
6275 		 */
6276 		spin_lock(&state_lock);
6277 		dp->dl_time = 0;
6278 		spin_unlock(&state_lock);
6279 		nfsd_break_one_deleg(dp);
6280 		put_client(clp);
6281 	}
6282 }
6283 
6284 u64
6285 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6286 				      size_t addr_size)
6287 {
6288 	u64 count = 0;
6289 	struct nfs4_client *clp;
6290 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6291 						nfsd_net_id);
6292 	LIST_HEAD(reaplist);
6293 
6294 	if (!nfsd_netns_ready(nn))
6295 		return count;
6296 
6297 	spin_lock(&nn->client_lock);
6298 	clp = nfsd_find_client(addr, addr_size);
6299 	if (clp)
6300 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6301 	spin_unlock(&nn->client_lock);
6302 
6303 	nfsd_recall_delegations(&reaplist);
6304 	return count;
6305 }
6306 
6307 u64
6308 nfsd_inject_recall_delegations(u64 max)
6309 {
6310 	u64 count = 0;
6311 	struct nfs4_client *clp, *next;
6312 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6313 						nfsd_net_id);
6314 	LIST_HEAD(reaplist);
6315 
6316 	if (!nfsd_netns_ready(nn))
6317 		return count;
6318 
6319 	spin_lock(&nn->client_lock);
6320 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6321 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6322 		if (max != 0 && ++count >= max)
6323 			break;
6324 	}
6325 	spin_unlock(&nn->client_lock);
6326 	nfsd_recall_delegations(&reaplist);
6327 	return count;
6328 }
6329 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6330 
6331 /*
6332  * Since the lifetime of a delegation isn't limited to that of an open, a
6333  * client may quite reasonably hang on to a delegation as long as it has
6334  * the inode cached.  This becomes an obvious problem the first time a
6335  * client's inode cache approaches the size of the server's total memory.
6336  *
6337  * For now we avoid this problem by imposing a hard limit on the number
6338  * of delegations, which varies according to the server's memory size.
6339  */
6340 static void
6341 set_max_delegations(void)
6342 {
6343 	/*
6344 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
6345 	 * estimates suggest that in the worst case (where every delegation
6346 	 * is for a different inode), a delegation could take about 1.5K,
6347 	 * giving a worst case usage of about 6% of memory.
6348 	 */
6349 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
6350 }
6351 
6352 static int nfs4_state_create_net(struct net *net)
6353 {
6354 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6355 	int i;
6356 
6357 	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6358 			CLIENT_HASH_SIZE, GFP_KERNEL);
6359 	if (!nn->conf_id_hashtbl)
6360 		goto err;
6361 	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6362 			CLIENT_HASH_SIZE, GFP_KERNEL);
6363 	if (!nn->unconf_id_hashtbl)
6364 		goto err_unconf_id;
6365 	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
6366 			SESSION_HASH_SIZE, GFP_KERNEL);
6367 	if (!nn->sessionid_hashtbl)
6368 		goto err_sessionid;
6369 
6370 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6371 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
6372 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
6373 	}
6374 	for (i = 0; i < SESSION_HASH_SIZE; i++)
6375 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6376 	nn->conf_name_tree = RB_ROOT;
6377 	nn->unconf_name_tree = RB_ROOT;
6378 	INIT_LIST_HEAD(&nn->client_lru);
6379 	INIT_LIST_HEAD(&nn->close_lru);
6380 	INIT_LIST_HEAD(&nn->del_recall_lru);
6381 	spin_lock_init(&nn->client_lock);
6382 
6383 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6384 	get_net(net);
6385 
6386 	return 0;
6387 
6388 err_sessionid:
6389 	kfree(nn->unconf_id_hashtbl);
6390 err_unconf_id:
6391 	kfree(nn->conf_id_hashtbl);
6392 err:
6393 	return -ENOMEM;
6394 }
6395 
6396 static void
6397 nfs4_state_destroy_net(struct net *net)
6398 {
6399 	int i;
6400 	struct nfs4_client *clp = NULL;
6401 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6402 
6403 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6404 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
6405 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6406 			destroy_client(clp);
6407 		}
6408 	}
6409 
6410 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6411 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
6412 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6413 			destroy_client(clp);
6414 		}
6415 	}
6416 
6417 	kfree(nn->sessionid_hashtbl);
6418 	kfree(nn->unconf_id_hashtbl);
6419 	kfree(nn->conf_id_hashtbl);
6420 	put_net(net);
6421 }
6422 
6423 int
6424 nfs4_state_start_net(struct net *net)
6425 {
6426 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6427 	int ret;
6428 
6429 	ret = nfs4_state_create_net(net);
6430 	if (ret)
6431 		return ret;
6432 	nn->boot_time = get_seconds();
6433 	nn->grace_ended = false;
6434 	locks_start_grace(net, &nn->nfsd4_manager);
6435 	nfsd4_client_tracking_init(net);
6436 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
6437 	       nn->nfsd4_grace, net);
6438 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
6439 	return 0;
6440 }
6441 
6442 /* initialization to perform when the nfsd service is started: */
6443 
6444 int
6445 nfs4_state_start(void)
6446 {
6447 	int ret;
6448 
6449 	ret = set_callback_cred();
6450 	if (ret)
6451 		return -ENOMEM;
6452 	laundry_wq = create_singlethread_workqueue("nfsd4");
6453 	if (laundry_wq == NULL) {
6454 		ret = -ENOMEM;
6455 		goto out_recovery;
6456 	}
6457 	ret = nfsd4_create_callback_queue();
6458 	if (ret)
6459 		goto out_free_laundry;
6460 
6461 	set_max_delegations();
6462 
6463 	return 0;
6464 
6465 out_free_laundry:
6466 	destroy_workqueue(laundry_wq);
6467 out_recovery:
6468 	return ret;
6469 }
6470 
6471 void
6472 nfs4_state_shutdown_net(struct net *net)
6473 {
6474 	struct nfs4_delegation *dp = NULL;
6475 	struct list_head *pos, *next, reaplist;
6476 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6477 
6478 	cancel_delayed_work_sync(&nn->laundromat_work);
6479 	locks_end_grace(&nn->nfsd4_manager);
6480 
6481 	INIT_LIST_HEAD(&reaplist);
6482 	spin_lock(&state_lock);
6483 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
6484 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6485 		unhash_delegation_locked(dp);
6486 		list_add(&dp->dl_recall_lru, &reaplist);
6487 	}
6488 	spin_unlock(&state_lock);
6489 	list_for_each_safe(pos, next, &reaplist) {
6490 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6491 		list_del_init(&dp->dl_recall_lru);
6492 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6493 		nfs4_put_stid(&dp->dl_stid);
6494 	}
6495 
6496 	nfsd4_client_tracking_exit(net);
6497 	nfs4_state_destroy_net(net);
6498 }
6499 
6500 void
6501 nfs4_state_shutdown(void)
6502 {
6503 	destroy_workqueue(laundry_wq);
6504 	nfsd4_destroy_callback_queue();
6505 }
6506 
6507 static void
6508 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6509 {
6510 	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
6511 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
6512 }
6513 
6514 static void
6515 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6516 {
6517 	if (cstate->minorversion) {
6518 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
6519 		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6520 	}
6521 }
6522 
6523 void
6524 clear_current_stateid(struct nfsd4_compound_state *cstate)
6525 {
6526 	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6527 }
6528 
6529 /*
6530  * functions to set current state id
6531  */
6532 void
6533 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6534 {
6535 	put_stateid(cstate, &odp->od_stateid);
6536 }
6537 
6538 void
6539 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
6540 {
6541 	put_stateid(cstate, &open->op_stateid);
6542 }
6543 
6544 void
6545 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6546 {
6547 	put_stateid(cstate, &close->cl_stateid);
6548 }
6549 
6550 void
6551 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
6552 {
6553 	put_stateid(cstate, &lock->lk_resp_stateid);
6554 }
6555 
6556 /*
6557  * functions to consume current state id
6558  */
6559 
6560 void
6561 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6562 {
6563 	get_stateid(cstate, &odp->od_stateid);
6564 }
6565 
6566 void
6567 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
6568 {
6569 	get_stateid(cstate, &drp->dr_stateid);
6570 }
6571 
6572 void
6573 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
6574 {
6575 	get_stateid(cstate, &fsp->fr_stateid);
6576 }
6577 
6578 void
6579 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
6580 {
6581 	get_stateid(cstate, &setattr->sa_stateid);
6582 }
6583 
6584 void
6585 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6586 {
6587 	get_stateid(cstate, &close->cl_stateid);
6588 }
6589 
6590 void
6591 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
6592 {
6593 	get_stateid(cstate, &locku->lu_stateid);
6594 }
6595 
6596 void
6597 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
6598 {
6599 	get_stateid(cstate, &read->rd_stateid);
6600 }
6601 
6602 void
6603 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
6604 {
6605 	get_stateid(cstate, &write->wr_stateid);
6606 }
6607