xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision bc5aa3a0)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49 
50 #include "netns.h"
51 #include "pnfs.h"
52 
53 #define NFSDDBG_FACILITY                NFSDDBG_PROC
54 
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 	.si_generation = ~0,
58 	.si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 	/* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 	.si_generation = 1,
65 };
66 
67 static u64 current_sessionid = 1;
68 
69 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
70 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
71 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
72 
73 /* forward declarations */
74 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
75 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
76 
77 /* Locking: */
78 
79 /*
80  * Currently used for the del_recall_lru and file hash table.  In an
81  * effort to decrease the scope of the client_mutex, this spinlock may
82  * eventually cover more:
83  */
84 static DEFINE_SPINLOCK(state_lock);
85 
86 /*
87  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
88  * the refcount on the open stateid to drop.
89  */
90 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
91 
92 static struct kmem_cache *openowner_slab;
93 static struct kmem_cache *lockowner_slab;
94 static struct kmem_cache *file_slab;
95 static struct kmem_cache *stateid_slab;
96 static struct kmem_cache *deleg_slab;
97 static struct kmem_cache *odstate_slab;
98 
99 static void free_session(struct nfsd4_session *);
100 
101 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
102 
103 static bool is_session_dead(struct nfsd4_session *ses)
104 {
105 	return ses->se_flags & NFS4_SESSION_DEAD;
106 }
107 
108 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
109 {
110 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
111 		return nfserr_jukebox;
112 	ses->se_flags |= NFS4_SESSION_DEAD;
113 	return nfs_ok;
114 }
115 
116 static bool is_client_expired(struct nfs4_client *clp)
117 {
118 	return clp->cl_time == 0;
119 }
120 
121 static __be32 get_client_locked(struct nfs4_client *clp)
122 {
123 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
124 
125 	lockdep_assert_held(&nn->client_lock);
126 
127 	if (is_client_expired(clp))
128 		return nfserr_expired;
129 	atomic_inc(&clp->cl_refcount);
130 	return nfs_ok;
131 }
132 
133 /* must be called under the client_lock */
134 static inline void
135 renew_client_locked(struct nfs4_client *clp)
136 {
137 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
138 
139 	if (is_client_expired(clp)) {
140 		WARN_ON(1);
141 		printk("%s: client (clientid %08x/%08x) already expired\n",
142 			__func__,
143 			clp->cl_clientid.cl_boot,
144 			clp->cl_clientid.cl_id);
145 		return;
146 	}
147 
148 	dprintk("renewing client (clientid %08x/%08x)\n",
149 			clp->cl_clientid.cl_boot,
150 			clp->cl_clientid.cl_id);
151 	list_move_tail(&clp->cl_lru, &nn->client_lru);
152 	clp->cl_time = get_seconds();
153 }
154 
155 static void put_client_renew_locked(struct nfs4_client *clp)
156 {
157 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
158 
159 	lockdep_assert_held(&nn->client_lock);
160 
161 	if (!atomic_dec_and_test(&clp->cl_refcount))
162 		return;
163 	if (!is_client_expired(clp))
164 		renew_client_locked(clp);
165 }
166 
167 static void put_client_renew(struct nfs4_client *clp)
168 {
169 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
170 
171 	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
172 		return;
173 	if (!is_client_expired(clp))
174 		renew_client_locked(clp);
175 	spin_unlock(&nn->client_lock);
176 }
177 
178 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
179 {
180 	__be32 status;
181 
182 	if (is_session_dead(ses))
183 		return nfserr_badsession;
184 	status = get_client_locked(ses->se_client);
185 	if (status)
186 		return status;
187 	atomic_inc(&ses->se_ref);
188 	return nfs_ok;
189 }
190 
191 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
192 {
193 	struct nfs4_client *clp = ses->se_client;
194 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
195 
196 	lockdep_assert_held(&nn->client_lock);
197 
198 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
199 		free_session(ses);
200 	put_client_renew_locked(clp);
201 }
202 
203 static void nfsd4_put_session(struct nfsd4_session *ses)
204 {
205 	struct nfs4_client *clp = ses->se_client;
206 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
207 
208 	spin_lock(&nn->client_lock);
209 	nfsd4_put_session_locked(ses);
210 	spin_unlock(&nn->client_lock);
211 }
212 
213 static inline struct nfs4_stateowner *
214 nfs4_get_stateowner(struct nfs4_stateowner *sop)
215 {
216 	atomic_inc(&sop->so_count);
217 	return sop;
218 }
219 
220 static int
221 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
222 {
223 	return (sop->so_owner.len == owner->len) &&
224 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
225 }
226 
227 static struct nfs4_openowner *
228 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
229 			struct nfs4_client *clp)
230 {
231 	struct nfs4_stateowner *so;
232 
233 	lockdep_assert_held(&clp->cl_lock);
234 
235 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
236 			    so_strhash) {
237 		if (!so->so_is_open_owner)
238 			continue;
239 		if (same_owner_str(so, &open->op_owner))
240 			return openowner(nfs4_get_stateowner(so));
241 	}
242 	return NULL;
243 }
244 
245 static struct nfs4_openowner *
246 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
247 			struct nfs4_client *clp)
248 {
249 	struct nfs4_openowner *oo;
250 
251 	spin_lock(&clp->cl_lock);
252 	oo = find_openstateowner_str_locked(hashval, open, clp);
253 	spin_unlock(&clp->cl_lock);
254 	return oo;
255 }
256 
257 static inline u32
258 opaque_hashval(const void *ptr, int nbytes)
259 {
260 	unsigned char *cptr = (unsigned char *) ptr;
261 
262 	u32 x = 0;
263 	while (nbytes--) {
264 		x *= 37;
265 		x += *cptr++;
266 	}
267 	return x;
268 }
269 
270 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
271 {
272 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
273 
274 	kmem_cache_free(file_slab, fp);
275 }
276 
277 void
278 put_nfs4_file(struct nfs4_file *fi)
279 {
280 	might_lock(&state_lock);
281 
282 	if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
283 		hlist_del_rcu(&fi->fi_hash);
284 		spin_unlock(&state_lock);
285 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
286 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
287 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
288 	}
289 }
290 
291 static struct file *
292 __nfs4_get_fd(struct nfs4_file *f, int oflag)
293 {
294 	if (f->fi_fds[oflag])
295 		return get_file(f->fi_fds[oflag]);
296 	return NULL;
297 }
298 
299 static struct file *
300 find_writeable_file_locked(struct nfs4_file *f)
301 {
302 	struct file *ret;
303 
304 	lockdep_assert_held(&f->fi_lock);
305 
306 	ret = __nfs4_get_fd(f, O_WRONLY);
307 	if (!ret)
308 		ret = __nfs4_get_fd(f, O_RDWR);
309 	return ret;
310 }
311 
312 static struct file *
313 find_writeable_file(struct nfs4_file *f)
314 {
315 	struct file *ret;
316 
317 	spin_lock(&f->fi_lock);
318 	ret = find_writeable_file_locked(f);
319 	spin_unlock(&f->fi_lock);
320 
321 	return ret;
322 }
323 
324 static struct file *find_readable_file_locked(struct nfs4_file *f)
325 {
326 	struct file *ret;
327 
328 	lockdep_assert_held(&f->fi_lock);
329 
330 	ret = __nfs4_get_fd(f, O_RDONLY);
331 	if (!ret)
332 		ret = __nfs4_get_fd(f, O_RDWR);
333 	return ret;
334 }
335 
336 static struct file *
337 find_readable_file(struct nfs4_file *f)
338 {
339 	struct file *ret;
340 
341 	spin_lock(&f->fi_lock);
342 	ret = find_readable_file_locked(f);
343 	spin_unlock(&f->fi_lock);
344 
345 	return ret;
346 }
347 
348 struct file *
349 find_any_file(struct nfs4_file *f)
350 {
351 	struct file *ret;
352 
353 	spin_lock(&f->fi_lock);
354 	ret = __nfs4_get_fd(f, O_RDWR);
355 	if (!ret) {
356 		ret = __nfs4_get_fd(f, O_WRONLY);
357 		if (!ret)
358 			ret = __nfs4_get_fd(f, O_RDONLY);
359 	}
360 	spin_unlock(&f->fi_lock);
361 	return ret;
362 }
363 
364 static atomic_long_t num_delegations;
365 unsigned long max_delegations;
366 
367 /*
368  * Open owner state (share locks)
369  */
370 
371 /* hash tables for lock and open owners */
372 #define OWNER_HASH_BITS              8
373 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
374 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
375 
376 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
377 {
378 	unsigned int ret;
379 
380 	ret = opaque_hashval(ownername->data, ownername->len);
381 	return ret & OWNER_HASH_MASK;
382 }
383 
384 /* hash table for nfs4_file */
385 #define FILE_HASH_BITS                   8
386 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
387 
388 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
389 {
390 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
391 }
392 
393 static unsigned int file_hashval(struct knfsd_fh *fh)
394 {
395 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
396 }
397 
398 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
399 
400 static void
401 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
402 {
403 	lockdep_assert_held(&fp->fi_lock);
404 
405 	if (access & NFS4_SHARE_ACCESS_WRITE)
406 		atomic_inc(&fp->fi_access[O_WRONLY]);
407 	if (access & NFS4_SHARE_ACCESS_READ)
408 		atomic_inc(&fp->fi_access[O_RDONLY]);
409 }
410 
411 static __be32
412 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
413 {
414 	lockdep_assert_held(&fp->fi_lock);
415 
416 	/* Does this access mode make sense? */
417 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
418 		return nfserr_inval;
419 
420 	/* Does it conflict with a deny mode already set? */
421 	if ((access & fp->fi_share_deny) != 0)
422 		return nfserr_share_denied;
423 
424 	__nfs4_file_get_access(fp, access);
425 	return nfs_ok;
426 }
427 
428 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
429 {
430 	/* Common case is that there is no deny mode. */
431 	if (deny) {
432 		/* Does this deny mode make sense? */
433 		if (deny & ~NFS4_SHARE_DENY_BOTH)
434 			return nfserr_inval;
435 
436 		if ((deny & NFS4_SHARE_DENY_READ) &&
437 		    atomic_read(&fp->fi_access[O_RDONLY]))
438 			return nfserr_share_denied;
439 
440 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
441 		    atomic_read(&fp->fi_access[O_WRONLY]))
442 			return nfserr_share_denied;
443 	}
444 	return nfs_ok;
445 }
446 
447 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
448 {
449 	might_lock(&fp->fi_lock);
450 
451 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
452 		struct file *f1 = NULL;
453 		struct file *f2 = NULL;
454 
455 		swap(f1, fp->fi_fds[oflag]);
456 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
457 			swap(f2, fp->fi_fds[O_RDWR]);
458 		spin_unlock(&fp->fi_lock);
459 		if (f1)
460 			fput(f1);
461 		if (f2)
462 			fput(f2);
463 	}
464 }
465 
466 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
467 {
468 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
469 
470 	if (access & NFS4_SHARE_ACCESS_WRITE)
471 		__nfs4_file_put_access(fp, O_WRONLY);
472 	if (access & NFS4_SHARE_ACCESS_READ)
473 		__nfs4_file_put_access(fp, O_RDONLY);
474 }
475 
476 /*
477  * Allocate a new open/delegation state counter. This is needed for
478  * pNFS for proper return on close semantics.
479  *
480  * Note that we only allocate it for pNFS-enabled exports, otherwise
481  * all pointers to struct nfs4_clnt_odstate are always NULL.
482  */
483 static struct nfs4_clnt_odstate *
484 alloc_clnt_odstate(struct nfs4_client *clp)
485 {
486 	struct nfs4_clnt_odstate *co;
487 
488 	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
489 	if (co) {
490 		co->co_client = clp;
491 		atomic_set(&co->co_odcount, 1);
492 	}
493 	return co;
494 }
495 
496 static void
497 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
498 {
499 	struct nfs4_file *fp = co->co_file;
500 
501 	lockdep_assert_held(&fp->fi_lock);
502 	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
503 }
504 
505 static inline void
506 get_clnt_odstate(struct nfs4_clnt_odstate *co)
507 {
508 	if (co)
509 		atomic_inc(&co->co_odcount);
510 }
511 
512 static void
513 put_clnt_odstate(struct nfs4_clnt_odstate *co)
514 {
515 	struct nfs4_file *fp;
516 
517 	if (!co)
518 		return;
519 
520 	fp = co->co_file;
521 	if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
522 		list_del(&co->co_perfile);
523 		spin_unlock(&fp->fi_lock);
524 
525 		nfsd4_return_all_file_layouts(co->co_client, fp);
526 		kmem_cache_free(odstate_slab, co);
527 	}
528 }
529 
530 static struct nfs4_clnt_odstate *
531 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
532 {
533 	struct nfs4_clnt_odstate *co;
534 	struct nfs4_client *cl;
535 
536 	if (!new)
537 		return NULL;
538 
539 	cl = new->co_client;
540 
541 	spin_lock(&fp->fi_lock);
542 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
543 		if (co->co_client == cl) {
544 			get_clnt_odstate(co);
545 			goto out;
546 		}
547 	}
548 	co = new;
549 	co->co_file = fp;
550 	hash_clnt_odstate_locked(new);
551 out:
552 	spin_unlock(&fp->fi_lock);
553 	return co;
554 }
555 
556 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
557 					 struct kmem_cache *slab)
558 {
559 	struct nfs4_stid *stid;
560 	int new_id;
561 
562 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
563 	if (!stid)
564 		return NULL;
565 
566 	idr_preload(GFP_KERNEL);
567 	spin_lock(&cl->cl_lock);
568 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
569 	spin_unlock(&cl->cl_lock);
570 	idr_preload_end();
571 	if (new_id < 0)
572 		goto out_free;
573 	stid->sc_client = cl;
574 	stid->sc_stateid.si_opaque.so_id = new_id;
575 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
576 	/* Will be incremented before return to client: */
577 	atomic_set(&stid->sc_count, 1);
578 	spin_lock_init(&stid->sc_lock);
579 
580 	/*
581 	 * It shouldn't be a problem to reuse an opaque stateid value.
582 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
583 	 * example, a stray write retransmission could be accepted by
584 	 * the server when it should have been rejected.  Therefore,
585 	 * adopt a trick from the sctp code to attempt to maximize the
586 	 * amount of time until an id is reused, by ensuring they always
587 	 * "increase" (mod INT_MAX):
588 	 */
589 	return stid;
590 out_free:
591 	kmem_cache_free(slab, stid);
592 	return NULL;
593 }
594 
595 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
596 {
597 	struct nfs4_stid *stid;
598 	struct nfs4_ol_stateid *stp;
599 
600 	stid = nfs4_alloc_stid(clp, stateid_slab);
601 	if (!stid)
602 		return NULL;
603 
604 	stp = openlockstateid(stid);
605 	stp->st_stid.sc_free = nfs4_free_ol_stateid;
606 	return stp;
607 }
608 
609 static void nfs4_free_deleg(struct nfs4_stid *stid)
610 {
611 	kmem_cache_free(deleg_slab, stid);
612 	atomic_long_dec(&num_delegations);
613 }
614 
615 /*
616  * When we recall a delegation, we should be careful not to hand it
617  * out again straight away.
618  * To ensure this we keep a pair of bloom filters ('new' and 'old')
619  * in which the filehandles of recalled delegations are "stored".
620  * If a filehandle appear in either filter, a delegation is blocked.
621  * When a delegation is recalled, the filehandle is stored in the "new"
622  * filter.
623  * Every 30 seconds we swap the filters and clear the "new" one,
624  * unless both are empty of course.
625  *
626  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
627  * low 3 bytes as hash-table indices.
628  *
629  * 'blocked_delegations_lock', which is always taken in block_delegations(),
630  * is used to manage concurrent access.  Testing does not need the lock
631  * except when swapping the two filters.
632  */
633 static DEFINE_SPINLOCK(blocked_delegations_lock);
634 static struct bloom_pair {
635 	int	entries, old_entries;
636 	time_t	swap_time;
637 	int	new; /* index into 'set' */
638 	DECLARE_BITMAP(set[2], 256);
639 } blocked_delegations;
640 
641 static int delegation_blocked(struct knfsd_fh *fh)
642 {
643 	u32 hash;
644 	struct bloom_pair *bd = &blocked_delegations;
645 
646 	if (bd->entries == 0)
647 		return 0;
648 	if (seconds_since_boot() - bd->swap_time > 30) {
649 		spin_lock(&blocked_delegations_lock);
650 		if (seconds_since_boot() - bd->swap_time > 30) {
651 			bd->entries -= bd->old_entries;
652 			bd->old_entries = bd->entries;
653 			memset(bd->set[bd->new], 0,
654 			       sizeof(bd->set[0]));
655 			bd->new = 1-bd->new;
656 			bd->swap_time = seconds_since_boot();
657 		}
658 		spin_unlock(&blocked_delegations_lock);
659 	}
660 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
661 	if (test_bit(hash&255, bd->set[0]) &&
662 	    test_bit((hash>>8)&255, bd->set[0]) &&
663 	    test_bit((hash>>16)&255, bd->set[0]))
664 		return 1;
665 
666 	if (test_bit(hash&255, bd->set[1]) &&
667 	    test_bit((hash>>8)&255, bd->set[1]) &&
668 	    test_bit((hash>>16)&255, bd->set[1]))
669 		return 1;
670 
671 	return 0;
672 }
673 
674 static void block_delegations(struct knfsd_fh *fh)
675 {
676 	u32 hash;
677 	struct bloom_pair *bd = &blocked_delegations;
678 
679 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
680 
681 	spin_lock(&blocked_delegations_lock);
682 	__set_bit(hash&255, bd->set[bd->new]);
683 	__set_bit((hash>>8)&255, bd->set[bd->new]);
684 	__set_bit((hash>>16)&255, bd->set[bd->new]);
685 	if (bd->entries == 0)
686 		bd->swap_time = seconds_since_boot();
687 	bd->entries += 1;
688 	spin_unlock(&blocked_delegations_lock);
689 }
690 
691 static struct nfs4_delegation *
692 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
693 		 struct nfs4_clnt_odstate *odstate)
694 {
695 	struct nfs4_delegation *dp;
696 	long n;
697 
698 	dprintk("NFSD alloc_init_deleg\n");
699 	n = atomic_long_inc_return(&num_delegations);
700 	if (n < 0 || n > max_delegations)
701 		goto out_dec;
702 	if (delegation_blocked(&current_fh->fh_handle))
703 		goto out_dec;
704 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
705 	if (dp == NULL)
706 		goto out_dec;
707 
708 	dp->dl_stid.sc_free = nfs4_free_deleg;
709 	/*
710 	 * delegation seqid's are never incremented.  The 4.1 special
711 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
712 	 * 0 anyway just for consistency and use 1:
713 	 */
714 	dp->dl_stid.sc_stateid.si_generation = 1;
715 	INIT_LIST_HEAD(&dp->dl_perfile);
716 	INIT_LIST_HEAD(&dp->dl_perclnt);
717 	INIT_LIST_HEAD(&dp->dl_recall_lru);
718 	dp->dl_clnt_odstate = odstate;
719 	get_clnt_odstate(odstate);
720 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
721 	dp->dl_retries = 1;
722 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
723 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
724 	return dp;
725 out_dec:
726 	atomic_long_dec(&num_delegations);
727 	return NULL;
728 }
729 
730 void
731 nfs4_put_stid(struct nfs4_stid *s)
732 {
733 	struct nfs4_file *fp = s->sc_file;
734 	struct nfs4_client *clp = s->sc_client;
735 
736 	might_lock(&clp->cl_lock);
737 
738 	if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
739 		wake_up_all(&close_wq);
740 		return;
741 	}
742 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
743 	spin_unlock(&clp->cl_lock);
744 	s->sc_free(s);
745 	if (fp)
746 		put_nfs4_file(fp);
747 }
748 
749 void
750 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
751 {
752 	stateid_t *src = &stid->sc_stateid;
753 
754 	spin_lock(&stid->sc_lock);
755 	if (unlikely(++src->si_generation == 0))
756 		src->si_generation = 1;
757 	memcpy(dst, src, sizeof(*dst));
758 	spin_unlock(&stid->sc_lock);
759 }
760 
761 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
762 {
763 	struct file *filp = NULL;
764 
765 	spin_lock(&fp->fi_lock);
766 	if (fp->fi_deleg_file && --fp->fi_delegees == 0)
767 		swap(filp, fp->fi_deleg_file);
768 	spin_unlock(&fp->fi_lock);
769 
770 	if (filp) {
771 		vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
772 		fput(filp);
773 	}
774 }
775 
776 void nfs4_unhash_stid(struct nfs4_stid *s)
777 {
778 	s->sc_type = 0;
779 }
780 
781 /**
782  * nfs4_get_existing_delegation - Discover if this delegation already exists
783  * @clp:     a pointer to the nfs4_client we're granting a delegation to
784  * @fp:      a pointer to the nfs4_file we're granting a delegation on
785  *
786  * Return:
787  *      On success: NULL if an existing delegation was not found.
788  *
789  *      On error: -EAGAIN if one was previously granted to this nfs4_client
790  *                 for this nfs4_file.
791  *
792  */
793 
794 static int
795 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
796 {
797 	struct nfs4_delegation *searchdp = NULL;
798 	struct nfs4_client *searchclp = NULL;
799 
800 	lockdep_assert_held(&state_lock);
801 	lockdep_assert_held(&fp->fi_lock);
802 
803 	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
804 		searchclp = searchdp->dl_stid.sc_client;
805 		if (clp == searchclp) {
806 			return -EAGAIN;
807 		}
808 	}
809 	return 0;
810 }
811 
812 /**
813  * hash_delegation_locked - Add a delegation to the appropriate lists
814  * @dp:     a pointer to the nfs4_delegation we are adding.
815  * @fp:     a pointer to the nfs4_file we're granting a delegation on
816  *
817  * Return:
818  *      On success: NULL if the delegation was successfully hashed.
819  *
820  *      On error: -EAGAIN if one was previously granted to this
821  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
822  *
823  */
824 
825 static int
826 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
827 {
828 	int status;
829 	struct nfs4_client *clp = dp->dl_stid.sc_client;
830 
831 	lockdep_assert_held(&state_lock);
832 	lockdep_assert_held(&fp->fi_lock);
833 
834 	status = nfs4_get_existing_delegation(clp, fp);
835 	if (status)
836 		return status;
837 	++fp->fi_delegees;
838 	atomic_inc(&dp->dl_stid.sc_count);
839 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
840 	list_add(&dp->dl_perfile, &fp->fi_delegations);
841 	list_add(&dp->dl_perclnt, &clp->cl_delegations);
842 	return 0;
843 }
844 
845 static bool
846 unhash_delegation_locked(struct nfs4_delegation *dp)
847 {
848 	struct nfs4_file *fp = dp->dl_stid.sc_file;
849 
850 	lockdep_assert_held(&state_lock);
851 
852 	if (list_empty(&dp->dl_perfile))
853 		return false;
854 
855 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
856 	/* Ensure that deleg break won't try to requeue it */
857 	++dp->dl_time;
858 	spin_lock(&fp->fi_lock);
859 	list_del_init(&dp->dl_perclnt);
860 	list_del_init(&dp->dl_recall_lru);
861 	list_del_init(&dp->dl_perfile);
862 	spin_unlock(&fp->fi_lock);
863 	return true;
864 }
865 
866 static void destroy_delegation(struct nfs4_delegation *dp)
867 {
868 	bool unhashed;
869 
870 	spin_lock(&state_lock);
871 	unhashed = unhash_delegation_locked(dp);
872 	spin_unlock(&state_lock);
873 	if (unhashed) {
874 		put_clnt_odstate(dp->dl_clnt_odstate);
875 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
876 		nfs4_put_stid(&dp->dl_stid);
877 	}
878 }
879 
880 static void revoke_delegation(struct nfs4_delegation *dp)
881 {
882 	struct nfs4_client *clp = dp->dl_stid.sc_client;
883 
884 	WARN_ON(!list_empty(&dp->dl_recall_lru));
885 
886 	put_clnt_odstate(dp->dl_clnt_odstate);
887 	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
888 
889 	if (clp->cl_minorversion == 0)
890 		nfs4_put_stid(&dp->dl_stid);
891 	else {
892 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
893 		spin_lock(&clp->cl_lock);
894 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
895 		spin_unlock(&clp->cl_lock);
896 	}
897 }
898 
899 /*
900  * SETCLIENTID state
901  */
902 
903 static unsigned int clientid_hashval(u32 id)
904 {
905 	return id & CLIENT_HASH_MASK;
906 }
907 
908 static unsigned int clientstr_hashval(const char *name)
909 {
910 	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
911 }
912 
913 /*
914  * We store the NONE, READ, WRITE, and BOTH bits separately in the
915  * st_{access,deny}_bmap field of the stateid, in order to track not
916  * only what share bits are currently in force, but also what
917  * combinations of share bits previous opens have used.  This allows us
918  * to enforce the recommendation of rfc 3530 14.2.19 that the server
919  * return an error if the client attempt to downgrade to a combination
920  * of share bits not explicable by closing some of its previous opens.
921  *
922  * XXX: This enforcement is actually incomplete, since we don't keep
923  * track of access/deny bit combinations; so, e.g., we allow:
924  *
925  *	OPEN allow read, deny write
926  *	OPEN allow both, deny none
927  *	DOWNGRADE allow read, deny none
928  *
929  * which we should reject.
930  */
931 static unsigned int
932 bmap_to_share_mode(unsigned long bmap) {
933 	int i;
934 	unsigned int access = 0;
935 
936 	for (i = 1; i < 4; i++) {
937 		if (test_bit(i, &bmap))
938 			access |= i;
939 	}
940 	return access;
941 }
942 
943 /* set share access for a given stateid */
944 static inline void
945 set_access(u32 access, struct nfs4_ol_stateid *stp)
946 {
947 	unsigned char mask = 1 << access;
948 
949 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
950 	stp->st_access_bmap |= mask;
951 }
952 
953 /* clear share access for a given stateid */
954 static inline void
955 clear_access(u32 access, struct nfs4_ol_stateid *stp)
956 {
957 	unsigned char mask = 1 << access;
958 
959 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
960 	stp->st_access_bmap &= ~mask;
961 }
962 
963 /* test whether a given stateid has access */
964 static inline bool
965 test_access(u32 access, struct nfs4_ol_stateid *stp)
966 {
967 	unsigned char mask = 1 << access;
968 
969 	return (bool)(stp->st_access_bmap & mask);
970 }
971 
972 /* set share deny for a given stateid */
973 static inline void
974 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
975 {
976 	unsigned char mask = 1 << deny;
977 
978 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
979 	stp->st_deny_bmap |= mask;
980 }
981 
982 /* clear share deny for a given stateid */
983 static inline void
984 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
985 {
986 	unsigned char mask = 1 << deny;
987 
988 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
989 	stp->st_deny_bmap &= ~mask;
990 }
991 
992 /* test whether a given stateid is denying specific access */
993 static inline bool
994 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
995 {
996 	unsigned char mask = 1 << deny;
997 
998 	return (bool)(stp->st_deny_bmap & mask);
999 }
1000 
1001 static int nfs4_access_to_omode(u32 access)
1002 {
1003 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
1004 	case NFS4_SHARE_ACCESS_READ:
1005 		return O_RDONLY;
1006 	case NFS4_SHARE_ACCESS_WRITE:
1007 		return O_WRONLY;
1008 	case NFS4_SHARE_ACCESS_BOTH:
1009 		return O_RDWR;
1010 	}
1011 	WARN_ON_ONCE(1);
1012 	return O_RDONLY;
1013 }
1014 
1015 /*
1016  * A stateid that had a deny mode associated with it is being released
1017  * or downgraded. Recalculate the deny mode on the file.
1018  */
1019 static void
1020 recalculate_deny_mode(struct nfs4_file *fp)
1021 {
1022 	struct nfs4_ol_stateid *stp;
1023 
1024 	spin_lock(&fp->fi_lock);
1025 	fp->fi_share_deny = 0;
1026 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1027 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1028 	spin_unlock(&fp->fi_lock);
1029 }
1030 
1031 static void
1032 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1033 {
1034 	int i;
1035 	bool change = false;
1036 
1037 	for (i = 1; i < 4; i++) {
1038 		if ((i & deny) != i) {
1039 			change = true;
1040 			clear_deny(i, stp);
1041 		}
1042 	}
1043 
1044 	/* Recalculate per-file deny mode if there was a change */
1045 	if (change)
1046 		recalculate_deny_mode(stp->st_stid.sc_file);
1047 }
1048 
1049 /* release all access and file references for a given stateid */
1050 static void
1051 release_all_access(struct nfs4_ol_stateid *stp)
1052 {
1053 	int i;
1054 	struct nfs4_file *fp = stp->st_stid.sc_file;
1055 
1056 	if (fp && stp->st_deny_bmap != 0)
1057 		recalculate_deny_mode(fp);
1058 
1059 	for (i = 1; i < 4; i++) {
1060 		if (test_access(i, stp))
1061 			nfs4_file_put_access(stp->st_stid.sc_file, i);
1062 		clear_access(i, stp);
1063 	}
1064 }
1065 
1066 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1067 {
1068 	kfree(sop->so_owner.data);
1069 	sop->so_ops->so_free(sop);
1070 }
1071 
1072 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1073 {
1074 	struct nfs4_client *clp = sop->so_client;
1075 
1076 	might_lock(&clp->cl_lock);
1077 
1078 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1079 		return;
1080 	sop->so_ops->so_unhash(sop);
1081 	spin_unlock(&clp->cl_lock);
1082 	nfs4_free_stateowner(sop);
1083 }
1084 
1085 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1086 {
1087 	struct nfs4_file *fp = stp->st_stid.sc_file;
1088 
1089 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1090 
1091 	if (list_empty(&stp->st_perfile))
1092 		return false;
1093 
1094 	spin_lock(&fp->fi_lock);
1095 	list_del_init(&stp->st_perfile);
1096 	spin_unlock(&fp->fi_lock);
1097 	list_del(&stp->st_perstateowner);
1098 	return true;
1099 }
1100 
1101 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1102 {
1103 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1104 
1105 	put_clnt_odstate(stp->st_clnt_odstate);
1106 	release_all_access(stp);
1107 	if (stp->st_stateowner)
1108 		nfs4_put_stateowner(stp->st_stateowner);
1109 	kmem_cache_free(stateid_slab, stid);
1110 }
1111 
1112 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1113 {
1114 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1115 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1116 	struct file *file;
1117 
1118 	file = find_any_file(stp->st_stid.sc_file);
1119 	if (file)
1120 		filp_close(file, (fl_owner_t)lo);
1121 	nfs4_free_ol_stateid(stid);
1122 }
1123 
1124 /*
1125  * Put the persistent reference to an already unhashed generic stateid, while
1126  * holding the cl_lock. If it's the last reference, then put it onto the
1127  * reaplist for later destruction.
1128  */
1129 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1130 				       struct list_head *reaplist)
1131 {
1132 	struct nfs4_stid *s = &stp->st_stid;
1133 	struct nfs4_client *clp = s->sc_client;
1134 
1135 	lockdep_assert_held(&clp->cl_lock);
1136 
1137 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1138 
1139 	if (!atomic_dec_and_test(&s->sc_count)) {
1140 		wake_up_all(&close_wq);
1141 		return;
1142 	}
1143 
1144 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1145 	list_add(&stp->st_locks, reaplist);
1146 }
1147 
1148 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1149 {
1150 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1151 
1152 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
1153 
1154 	list_del_init(&stp->st_locks);
1155 	nfs4_unhash_stid(&stp->st_stid);
1156 	return unhash_ol_stateid(stp);
1157 }
1158 
1159 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1160 {
1161 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1162 	bool unhashed;
1163 
1164 	spin_lock(&oo->oo_owner.so_client->cl_lock);
1165 	unhashed = unhash_lock_stateid(stp);
1166 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
1167 	if (unhashed)
1168 		nfs4_put_stid(&stp->st_stid);
1169 }
1170 
1171 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1172 {
1173 	struct nfs4_client *clp = lo->lo_owner.so_client;
1174 
1175 	lockdep_assert_held(&clp->cl_lock);
1176 
1177 	list_del_init(&lo->lo_owner.so_strhash);
1178 }
1179 
1180 /*
1181  * Free a list of generic stateids that were collected earlier after being
1182  * fully unhashed.
1183  */
1184 static void
1185 free_ol_stateid_reaplist(struct list_head *reaplist)
1186 {
1187 	struct nfs4_ol_stateid *stp;
1188 	struct nfs4_file *fp;
1189 
1190 	might_sleep();
1191 
1192 	while (!list_empty(reaplist)) {
1193 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1194 				       st_locks);
1195 		list_del(&stp->st_locks);
1196 		fp = stp->st_stid.sc_file;
1197 		stp->st_stid.sc_free(&stp->st_stid);
1198 		if (fp)
1199 			put_nfs4_file(fp);
1200 	}
1201 }
1202 
1203 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1204 				       struct list_head *reaplist)
1205 {
1206 	struct nfs4_ol_stateid *stp;
1207 
1208 	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1209 
1210 	while (!list_empty(&open_stp->st_locks)) {
1211 		stp = list_entry(open_stp->st_locks.next,
1212 				struct nfs4_ol_stateid, st_locks);
1213 		WARN_ON(!unhash_lock_stateid(stp));
1214 		put_ol_stateid_locked(stp, reaplist);
1215 	}
1216 }
1217 
1218 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1219 				struct list_head *reaplist)
1220 {
1221 	bool unhashed;
1222 
1223 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1224 
1225 	unhashed = unhash_ol_stateid(stp);
1226 	release_open_stateid_locks(stp, reaplist);
1227 	return unhashed;
1228 }
1229 
1230 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1231 {
1232 	LIST_HEAD(reaplist);
1233 
1234 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1235 	if (unhash_open_stateid(stp, &reaplist))
1236 		put_ol_stateid_locked(stp, &reaplist);
1237 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1238 	free_ol_stateid_reaplist(&reaplist);
1239 }
1240 
1241 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1242 {
1243 	struct nfs4_client *clp = oo->oo_owner.so_client;
1244 
1245 	lockdep_assert_held(&clp->cl_lock);
1246 
1247 	list_del_init(&oo->oo_owner.so_strhash);
1248 	list_del_init(&oo->oo_perclient);
1249 }
1250 
1251 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1252 {
1253 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1254 					  nfsd_net_id);
1255 	struct nfs4_ol_stateid *s;
1256 
1257 	spin_lock(&nn->client_lock);
1258 	s = oo->oo_last_closed_stid;
1259 	if (s) {
1260 		list_del_init(&oo->oo_close_lru);
1261 		oo->oo_last_closed_stid = NULL;
1262 	}
1263 	spin_unlock(&nn->client_lock);
1264 	if (s)
1265 		nfs4_put_stid(&s->st_stid);
1266 }
1267 
1268 static void release_openowner(struct nfs4_openowner *oo)
1269 {
1270 	struct nfs4_ol_stateid *stp;
1271 	struct nfs4_client *clp = oo->oo_owner.so_client;
1272 	struct list_head reaplist;
1273 
1274 	INIT_LIST_HEAD(&reaplist);
1275 
1276 	spin_lock(&clp->cl_lock);
1277 	unhash_openowner_locked(oo);
1278 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1279 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1280 				struct nfs4_ol_stateid, st_perstateowner);
1281 		if (unhash_open_stateid(stp, &reaplist))
1282 			put_ol_stateid_locked(stp, &reaplist);
1283 	}
1284 	spin_unlock(&clp->cl_lock);
1285 	free_ol_stateid_reaplist(&reaplist);
1286 	release_last_closed_stateid(oo);
1287 	nfs4_put_stateowner(&oo->oo_owner);
1288 }
1289 
1290 static inline int
1291 hash_sessionid(struct nfs4_sessionid *sessionid)
1292 {
1293 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1294 
1295 	return sid->sequence % SESSION_HASH_SIZE;
1296 }
1297 
1298 #ifdef CONFIG_SUNRPC_DEBUG
1299 static inline void
1300 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1301 {
1302 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1303 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1304 }
1305 #else
1306 static inline void
1307 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1308 {
1309 }
1310 #endif
1311 
1312 /*
1313  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1314  * won't be used for replay.
1315  */
1316 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1317 {
1318 	struct nfs4_stateowner *so = cstate->replay_owner;
1319 
1320 	if (nfserr == nfserr_replay_me)
1321 		return;
1322 
1323 	if (!seqid_mutating_err(ntohl(nfserr))) {
1324 		nfsd4_cstate_clear_replay(cstate);
1325 		return;
1326 	}
1327 	if (!so)
1328 		return;
1329 	if (so->so_is_open_owner)
1330 		release_last_closed_stateid(openowner(so));
1331 	so->so_seqid++;
1332 	return;
1333 }
1334 
1335 static void
1336 gen_sessionid(struct nfsd4_session *ses)
1337 {
1338 	struct nfs4_client *clp = ses->se_client;
1339 	struct nfsd4_sessionid *sid;
1340 
1341 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1342 	sid->clientid = clp->cl_clientid;
1343 	sid->sequence = current_sessionid++;
1344 	sid->reserved = 0;
1345 }
1346 
1347 /*
1348  * The protocol defines ca_maxresponssize_cached to include the size of
1349  * the rpc header, but all we need to cache is the data starting after
1350  * the end of the initial SEQUENCE operation--the rest we regenerate
1351  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1352  * value that is the number of bytes in our cache plus a few additional
1353  * bytes.  In order to stay on the safe side, and not promise more than
1354  * we can cache, those additional bytes must be the minimum possible: 24
1355  * bytes of rpc header (xid through accept state, with AUTH_NULL
1356  * verifier), 12 for the compound header (with zero-length tag), and 44
1357  * for the SEQUENCE op response:
1358  */
1359 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1360 
1361 static void
1362 free_session_slots(struct nfsd4_session *ses)
1363 {
1364 	int i;
1365 
1366 	for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1367 		kfree(ses->se_slots[i]);
1368 }
1369 
1370 /*
1371  * We don't actually need to cache the rpc and session headers, so we
1372  * can allocate a little less for each slot:
1373  */
1374 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1375 {
1376 	u32 size;
1377 
1378 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1379 		size = 0;
1380 	else
1381 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1382 	return size + sizeof(struct nfsd4_slot);
1383 }
1384 
1385 /*
1386  * XXX: If we run out of reserved DRC memory we could (up to a point)
1387  * re-negotiate active sessions and reduce their slot usage to make
1388  * room for new connections. For now we just fail the create session.
1389  */
1390 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1391 {
1392 	u32 slotsize = slot_bytes(ca);
1393 	u32 num = ca->maxreqs;
1394 	int avail;
1395 
1396 	spin_lock(&nfsd_drc_lock);
1397 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1398 		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1399 	num = min_t(int, num, avail / slotsize);
1400 	nfsd_drc_mem_used += num * slotsize;
1401 	spin_unlock(&nfsd_drc_lock);
1402 
1403 	return num;
1404 }
1405 
1406 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1407 {
1408 	int slotsize = slot_bytes(ca);
1409 
1410 	spin_lock(&nfsd_drc_lock);
1411 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1412 	spin_unlock(&nfsd_drc_lock);
1413 }
1414 
1415 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1416 					   struct nfsd4_channel_attrs *battrs)
1417 {
1418 	int numslots = fattrs->maxreqs;
1419 	int slotsize = slot_bytes(fattrs);
1420 	struct nfsd4_session *new;
1421 	int mem, i;
1422 
1423 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1424 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1425 	mem = numslots * sizeof(struct nfsd4_slot *);
1426 
1427 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1428 	if (!new)
1429 		return NULL;
1430 	/* allocate each struct nfsd4_slot and data cache in one piece */
1431 	for (i = 0; i < numslots; i++) {
1432 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1433 		if (!new->se_slots[i])
1434 			goto out_free;
1435 	}
1436 
1437 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1438 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1439 
1440 	return new;
1441 out_free:
1442 	while (i--)
1443 		kfree(new->se_slots[i]);
1444 	kfree(new);
1445 	return NULL;
1446 }
1447 
1448 static void free_conn(struct nfsd4_conn *c)
1449 {
1450 	svc_xprt_put(c->cn_xprt);
1451 	kfree(c);
1452 }
1453 
1454 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1455 {
1456 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1457 	struct nfs4_client *clp = c->cn_session->se_client;
1458 
1459 	spin_lock(&clp->cl_lock);
1460 	if (!list_empty(&c->cn_persession)) {
1461 		list_del(&c->cn_persession);
1462 		free_conn(c);
1463 	}
1464 	nfsd4_probe_callback(clp);
1465 	spin_unlock(&clp->cl_lock);
1466 }
1467 
1468 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1469 {
1470 	struct nfsd4_conn *conn;
1471 
1472 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1473 	if (!conn)
1474 		return NULL;
1475 	svc_xprt_get(rqstp->rq_xprt);
1476 	conn->cn_xprt = rqstp->rq_xprt;
1477 	conn->cn_flags = flags;
1478 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1479 	return conn;
1480 }
1481 
1482 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1483 {
1484 	conn->cn_session = ses;
1485 	list_add(&conn->cn_persession, &ses->se_conns);
1486 }
1487 
1488 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1489 {
1490 	struct nfs4_client *clp = ses->se_client;
1491 
1492 	spin_lock(&clp->cl_lock);
1493 	__nfsd4_hash_conn(conn, ses);
1494 	spin_unlock(&clp->cl_lock);
1495 }
1496 
1497 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1498 {
1499 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1500 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1501 }
1502 
1503 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1504 {
1505 	int ret;
1506 
1507 	nfsd4_hash_conn(conn, ses);
1508 	ret = nfsd4_register_conn(conn);
1509 	if (ret)
1510 		/* oops; xprt is already down: */
1511 		nfsd4_conn_lost(&conn->cn_xpt_user);
1512 	/* We may have gained or lost a callback channel: */
1513 	nfsd4_probe_callback_sync(ses->se_client);
1514 }
1515 
1516 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1517 {
1518 	u32 dir = NFS4_CDFC4_FORE;
1519 
1520 	if (cses->flags & SESSION4_BACK_CHAN)
1521 		dir |= NFS4_CDFC4_BACK;
1522 	return alloc_conn(rqstp, dir);
1523 }
1524 
1525 /* must be called under client_lock */
1526 static void nfsd4_del_conns(struct nfsd4_session *s)
1527 {
1528 	struct nfs4_client *clp = s->se_client;
1529 	struct nfsd4_conn *c;
1530 
1531 	spin_lock(&clp->cl_lock);
1532 	while (!list_empty(&s->se_conns)) {
1533 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1534 		list_del_init(&c->cn_persession);
1535 		spin_unlock(&clp->cl_lock);
1536 
1537 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1538 		free_conn(c);
1539 
1540 		spin_lock(&clp->cl_lock);
1541 	}
1542 	spin_unlock(&clp->cl_lock);
1543 }
1544 
1545 static void __free_session(struct nfsd4_session *ses)
1546 {
1547 	free_session_slots(ses);
1548 	kfree(ses);
1549 }
1550 
1551 static void free_session(struct nfsd4_session *ses)
1552 {
1553 	nfsd4_del_conns(ses);
1554 	nfsd4_put_drc_mem(&ses->se_fchannel);
1555 	__free_session(ses);
1556 }
1557 
1558 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1559 {
1560 	int idx;
1561 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1562 
1563 	new->se_client = clp;
1564 	gen_sessionid(new);
1565 
1566 	INIT_LIST_HEAD(&new->se_conns);
1567 
1568 	new->se_cb_seq_nr = 1;
1569 	new->se_flags = cses->flags;
1570 	new->se_cb_prog = cses->callback_prog;
1571 	new->se_cb_sec = cses->cb_sec;
1572 	atomic_set(&new->se_ref, 0);
1573 	idx = hash_sessionid(&new->se_sessionid);
1574 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1575 	spin_lock(&clp->cl_lock);
1576 	list_add(&new->se_perclnt, &clp->cl_sessions);
1577 	spin_unlock(&clp->cl_lock);
1578 
1579 	{
1580 		struct sockaddr *sa = svc_addr(rqstp);
1581 		/*
1582 		 * This is a little silly; with sessions there's no real
1583 		 * use for the callback address.  Use the peer address
1584 		 * as a reasonable default for now, but consider fixing
1585 		 * the rpc client not to require an address in the
1586 		 * future:
1587 		 */
1588 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1589 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1590 	}
1591 }
1592 
1593 /* caller must hold client_lock */
1594 static struct nfsd4_session *
1595 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1596 {
1597 	struct nfsd4_session *elem;
1598 	int idx;
1599 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1600 
1601 	lockdep_assert_held(&nn->client_lock);
1602 
1603 	dump_sessionid(__func__, sessionid);
1604 	idx = hash_sessionid(sessionid);
1605 	/* Search in the appropriate list */
1606 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1607 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1608 			    NFS4_MAX_SESSIONID_LEN)) {
1609 			return elem;
1610 		}
1611 	}
1612 
1613 	dprintk("%s: session not found\n", __func__);
1614 	return NULL;
1615 }
1616 
1617 static struct nfsd4_session *
1618 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1619 		__be32 *ret)
1620 {
1621 	struct nfsd4_session *session;
1622 	__be32 status = nfserr_badsession;
1623 
1624 	session = __find_in_sessionid_hashtbl(sessionid, net);
1625 	if (!session)
1626 		goto out;
1627 	status = nfsd4_get_session_locked(session);
1628 	if (status)
1629 		session = NULL;
1630 out:
1631 	*ret = status;
1632 	return session;
1633 }
1634 
1635 /* caller must hold client_lock */
1636 static void
1637 unhash_session(struct nfsd4_session *ses)
1638 {
1639 	struct nfs4_client *clp = ses->se_client;
1640 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1641 
1642 	lockdep_assert_held(&nn->client_lock);
1643 
1644 	list_del(&ses->se_hash);
1645 	spin_lock(&ses->se_client->cl_lock);
1646 	list_del(&ses->se_perclnt);
1647 	spin_unlock(&ses->se_client->cl_lock);
1648 }
1649 
1650 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1651 static int
1652 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1653 {
1654 	/*
1655 	 * We're assuming the clid was not given out from a boot
1656 	 * precisely 2^32 (about 136 years) before this one.  That seems
1657 	 * a safe assumption:
1658 	 */
1659 	if (clid->cl_boot == (u32)nn->boot_time)
1660 		return 0;
1661 	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1662 		clid->cl_boot, clid->cl_id, nn->boot_time);
1663 	return 1;
1664 }
1665 
1666 /*
1667  * XXX Should we use a slab cache ?
1668  * This type of memory management is somewhat inefficient, but we use it
1669  * anyway since SETCLIENTID is not a common operation.
1670  */
1671 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1672 {
1673 	struct nfs4_client *clp;
1674 	int i;
1675 
1676 	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1677 	if (clp == NULL)
1678 		return NULL;
1679 	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1680 	if (clp->cl_name.data == NULL)
1681 		goto err_no_name;
1682 	clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1683 			OWNER_HASH_SIZE, GFP_KERNEL);
1684 	if (!clp->cl_ownerstr_hashtbl)
1685 		goto err_no_hashtbl;
1686 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1687 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1688 	clp->cl_name.len = name.len;
1689 	INIT_LIST_HEAD(&clp->cl_sessions);
1690 	idr_init(&clp->cl_stateids);
1691 	atomic_set(&clp->cl_refcount, 0);
1692 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1693 	INIT_LIST_HEAD(&clp->cl_idhash);
1694 	INIT_LIST_HEAD(&clp->cl_openowners);
1695 	INIT_LIST_HEAD(&clp->cl_delegations);
1696 	INIT_LIST_HEAD(&clp->cl_lru);
1697 	INIT_LIST_HEAD(&clp->cl_revoked);
1698 #ifdef CONFIG_NFSD_PNFS
1699 	INIT_LIST_HEAD(&clp->cl_lo_states);
1700 #endif
1701 	spin_lock_init(&clp->cl_lock);
1702 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1703 	return clp;
1704 err_no_hashtbl:
1705 	kfree(clp->cl_name.data);
1706 err_no_name:
1707 	kfree(clp);
1708 	return NULL;
1709 }
1710 
1711 static void
1712 free_client(struct nfs4_client *clp)
1713 {
1714 	while (!list_empty(&clp->cl_sessions)) {
1715 		struct nfsd4_session *ses;
1716 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1717 				se_perclnt);
1718 		list_del(&ses->se_perclnt);
1719 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
1720 		free_session(ses);
1721 	}
1722 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1723 	free_svc_cred(&clp->cl_cred);
1724 	kfree(clp->cl_ownerstr_hashtbl);
1725 	kfree(clp->cl_name.data);
1726 	idr_destroy(&clp->cl_stateids);
1727 	kfree(clp);
1728 }
1729 
1730 /* must be called under the client_lock */
1731 static void
1732 unhash_client_locked(struct nfs4_client *clp)
1733 {
1734 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1735 	struct nfsd4_session *ses;
1736 
1737 	lockdep_assert_held(&nn->client_lock);
1738 
1739 	/* Mark the client as expired! */
1740 	clp->cl_time = 0;
1741 	/* Make it invisible */
1742 	if (!list_empty(&clp->cl_idhash)) {
1743 		list_del_init(&clp->cl_idhash);
1744 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1745 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1746 		else
1747 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1748 	}
1749 	list_del_init(&clp->cl_lru);
1750 	spin_lock(&clp->cl_lock);
1751 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1752 		list_del_init(&ses->se_hash);
1753 	spin_unlock(&clp->cl_lock);
1754 }
1755 
1756 static void
1757 unhash_client(struct nfs4_client *clp)
1758 {
1759 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1760 
1761 	spin_lock(&nn->client_lock);
1762 	unhash_client_locked(clp);
1763 	spin_unlock(&nn->client_lock);
1764 }
1765 
1766 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1767 {
1768 	if (atomic_read(&clp->cl_refcount))
1769 		return nfserr_jukebox;
1770 	unhash_client_locked(clp);
1771 	return nfs_ok;
1772 }
1773 
1774 static void
1775 __destroy_client(struct nfs4_client *clp)
1776 {
1777 	struct nfs4_openowner *oo;
1778 	struct nfs4_delegation *dp;
1779 	struct list_head reaplist;
1780 
1781 	INIT_LIST_HEAD(&reaplist);
1782 	spin_lock(&state_lock);
1783 	while (!list_empty(&clp->cl_delegations)) {
1784 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1785 		WARN_ON(!unhash_delegation_locked(dp));
1786 		list_add(&dp->dl_recall_lru, &reaplist);
1787 	}
1788 	spin_unlock(&state_lock);
1789 	while (!list_empty(&reaplist)) {
1790 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1791 		list_del_init(&dp->dl_recall_lru);
1792 		put_clnt_odstate(dp->dl_clnt_odstate);
1793 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1794 		nfs4_put_stid(&dp->dl_stid);
1795 	}
1796 	while (!list_empty(&clp->cl_revoked)) {
1797 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1798 		list_del_init(&dp->dl_recall_lru);
1799 		nfs4_put_stid(&dp->dl_stid);
1800 	}
1801 	while (!list_empty(&clp->cl_openowners)) {
1802 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1803 		nfs4_get_stateowner(&oo->oo_owner);
1804 		release_openowner(oo);
1805 	}
1806 	nfsd4_return_all_client_layouts(clp);
1807 	nfsd4_shutdown_callback(clp);
1808 	if (clp->cl_cb_conn.cb_xprt)
1809 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1810 	free_client(clp);
1811 }
1812 
1813 static void
1814 destroy_client(struct nfs4_client *clp)
1815 {
1816 	unhash_client(clp);
1817 	__destroy_client(clp);
1818 }
1819 
1820 static void expire_client(struct nfs4_client *clp)
1821 {
1822 	unhash_client(clp);
1823 	nfsd4_client_record_remove(clp);
1824 	__destroy_client(clp);
1825 }
1826 
1827 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1828 {
1829 	memcpy(target->cl_verifier.data, source->data,
1830 			sizeof(target->cl_verifier.data));
1831 }
1832 
1833 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1834 {
1835 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1836 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1837 }
1838 
1839 int strdup_if_nonnull(char **target, char *source)
1840 {
1841 	if (source) {
1842 		*target = kstrdup(source, GFP_KERNEL);
1843 		if (!*target)
1844 			return -ENOMEM;
1845 	} else
1846 		*target = NULL;
1847 	return 0;
1848 }
1849 
1850 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1851 {
1852 	int ret;
1853 
1854 	ret = strdup_if_nonnull(&target->cr_principal, source->cr_principal);
1855 	if (ret)
1856 		return ret;
1857 	ret = strdup_if_nonnull(&target->cr_raw_principal,
1858 					source->cr_raw_principal);
1859 	if (ret)
1860 		return ret;
1861 	target->cr_flavor = source->cr_flavor;
1862 	target->cr_uid = source->cr_uid;
1863 	target->cr_gid = source->cr_gid;
1864 	target->cr_group_info = source->cr_group_info;
1865 	get_group_info(target->cr_group_info);
1866 	target->cr_gss_mech = source->cr_gss_mech;
1867 	if (source->cr_gss_mech)
1868 		gss_mech_get(source->cr_gss_mech);
1869 	return 0;
1870 }
1871 
1872 static int
1873 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1874 {
1875 	if (o1->len < o2->len)
1876 		return -1;
1877 	if (o1->len > o2->len)
1878 		return 1;
1879 	return memcmp(o1->data, o2->data, o1->len);
1880 }
1881 
1882 static int same_name(const char *n1, const char *n2)
1883 {
1884 	return 0 == memcmp(n1, n2, HEXDIR_LEN);
1885 }
1886 
1887 static int
1888 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1889 {
1890 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1891 }
1892 
1893 static int
1894 same_clid(clientid_t *cl1, clientid_t *cl2)
1895 {
1896 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1897 }
1898 
1899 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1900 {
1901 	int i;
1902 
1903 	if (g1->ngroups != g2->ngroups)
1904 		return false;
1905 	for (i=0; i<g1->ngroups; i++)
1906 		if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1907 			return false;
1908 	return true;
1909 }
1910 
1911 /*
1912  * RFC 3530 language requires clid_inuse be returned when the
1913  * "principal" associated with a requests differs from that previously
1914  * used.  We use uid, gid's, and gss principal string as our best
1915  * approximation.  We also don't want to allow non-gss use of a client
1916  * established using gss: in theory cr_principal should catch that
1917  * change, but in practice cr_principal can be null even in the gss case
1918  * since gssd doesn't always pass down a principal string.
1919  */
1920 static bool is_gss_cred(struct svc_cred *cr)
1921 {
1922 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
1923 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1924 }
1925 
1926 
1927 static bool
1928 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1929 {
1930 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1931 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1932 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1933 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1934 		return false;
1935 	if (cr1->cr_principal == cr2->cr_principal)
1936 		return true;
1937 	if (!cr1->cr_principal || !cr2->cr_principal)
1938 		return false;
1939 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1940 }
1941 
1942 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1943 {
1944 	struct svc_cred *cr = &rqstp->rq_cred;
1945 	u32 service;
1946 
1947 	if (!cr->cr_gss_mech)
1948 		return false;
1949 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1950 	return service == RPC_GSS_SVC_INTEGRITY ||
1951 	       service == RPC_GSS_SVC_PRIVACY;
1952 }
1953 
1954 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1955 {
1956 	struct svc_cred *cr = &rqstp->rq_cred;
1957 
1958 	if (!cl->cl_mach_cred)
1959 		return true;
1960 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1961 		return false;
1962 	if (!svc_rqst_integrity_protected(rqstp))
1963 		return false;
1964 	if (cl->cl_cred.cr_raw_principal)
1965 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
1966 						cr->cr_raw_principal);
1967 	if (!cr->cr_principal)
1968 		return false;
1969 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1970 }
1971 
1972 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1973 {
1974 	__be32 verf[2];
1975 
1976 	/*
1977 	 * This is opaque to client, so no need to byte-swap. Use
1978 	 * __force to keep sparse happy
1979 	 */
1980 	verf[0] = (__force __be32)get_seconds();
1981 	verf[1] = (__force __be32)nn->clverifier_counter++;
1982 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1983 }
1984 
1985 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1986 {
1987 	clp->cl_clientid.cl_boot = nn->boot_time;
1988 	clp->cl_clientid.cl_id = nn->clientid_counter++;
1989 	gen_confirm(clp, nn);
1990 }
1991 
1992 static struct nfs4_stid *
1993 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1994 {
1995 	struct nfs4_stid *ret;
1996 
1997 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1998 	if (!ret || !ret->sc_type)
1999 		return NULL;
2000 	return ret;
2001 }
2002 
2003 static struct nfs4_stid *
2004 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2005 {
2006 	struct nfs4_stid *s;
2007 
2008 	spin_lock(&cl->cl_lock);
2009 	s = find_stateid_locked(cl, t);
2010 	if (s != NULL) {
2011 		if (typemask & s->sc_type)
2012 			atomic_inc(&s->sc_count);
2013 		else
2014 			s = NULL;
2015 	}
2016 	spin_unlock(&cl->cl_lock);
2017 	return s;
2018 }
2019 
2020 static struct nfs4_client *create_client(struct xdr_netobj name,
2021 		struct svc_rqst *rqstp, nfs4_verifier *verf)
2022 {
2023 	struct nfs4_client *clp;
2024 	struct sockaddr *sa = svc_addr(rqstp);
2025 	int ret;
2026 	struct net *net = SVC_NET(rqstp);
2027 
2028 	clp = alloc_client(name);
2029 	if (clp == NULL)
2030 		return NULL;
2031 
2032 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2033 	if (ret) {
2034 		free_client(clp);
2035 		return NULL;
2036 	}
2037 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2038 	clp->cl_time = get_seconds();
2039 	clear_bit(0, &clp->cl_cb_slot_busy);
2040 	copy_verf(clp, verf);
2041 	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2042 	clp->cl_cb_session = NULL;
2043 	clp->net = net;
2044 	return clp;
2045 }
2046 
2047 static void
2048 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2049 {
2050 	struct rb_node **new = &(root->rb_node), *parent = NULL;
2051 	struct nfs4_client *clp;
2052 
2053 	while (*new) {
2054 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2055 		parent = *new;
2056 
2057 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2058 			new = &((*new)->rb_left);
2059 		else
2060 			new = &((*new)->rb_right);
2061 	}
2062 
2063 	rb_link_node(&new_clp->cl_namenode, parent, new);
2064 	rb_insert_color(&new_clp->cl_namenode, root);
2065 }
2066 
2067 static struct nfs4_client *
2068 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2069 {
2070 	int cmp;
2071 	struct rb_node *node = root->rb_node;
2072 	struct nfs4_client *clp;
2073 
2074 	while (node) {
2075 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2076 		cmp = compare_blob(&clp->cl_name, name);
2077 		if (cmp > 0)
2078 			node = node->rb_left;
2079 		else if (cmp < 0)
2080 			node = node->rb_right;
2081 		else
2082 			return clp;
2083 	}
2084 	return NULL;
2085 }
2086 
2087 static void
2088 add_to_unconfirmed(struct nfs4_client *clp)
2089 {
2090 	unsigned int idhashval;
2091 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2092 
2093 	lockdep_assert_held(&nn->client_lock);
2094 
2095 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2096 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2097 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2098 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2099 	renew_client_locked(clp);
2100 }
2101 
2102 static void
2103 move_to_confirmed(struct nfs4_client *clp)
2104 {
2105 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2106 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2107 
2108 	lockdep_assert_held(&nn->client_lock);
2109 
2110 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2111 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2112 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2113 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
2114 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2115 	renew_client_locked(clp);
2116 }
2117 
2118 static struct nfs4_client *
2119 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2120 {
2121 	struct nfs4_client *clp;
2122 	unsigned int idhashval = clientid_hashval(clid->cl_id);
2123 
2124 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2125 		if (same_clid(&clp->cl_clientid, clid)) {
2126 			if ((bool)clp->cl_minorversion != sessions)
2127 				return NULL;
2128 			renew_client_locked(clp);
2129 			return clp;
2130 		}
2131 	}
2132 	return NULL;
2133 }
2134 
2135 static struct nfs4_client *
2136 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2137 {
2138 	struct list_head *tbl = nn->conf_id_hashtbl;
2139 
2140 	lockdep_assert_held(&nn->client_lock);
2141 	return find_client_in_id_table(tbl, clid, sessions);
2142 }
2143 
2144 static struct nfs4_client *
2145 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2146 {
2147 	struct list_head *tbl = nn->unconf_id_hashtbl;
2148 
2149 	lockdep_assert_held(&nn->client_lock);
2150 	return find_client_in_id_table(tbl, clid, sessions);
2151 }
2152 
2153 static bool clp_used_exchangeid(struct nfs4_client *clp)
2154 {
2155 	return clp->cl_exchange_flags != 0;
2156 }
2157 
2158 static struct nfs4_client *
2159 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2160 {
2161 	lockdep_assert_held(&nn->client_lock);
2162 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
2163 }
2164 
2165 static struct nfs4_client *
2166 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2167 {
2168 	lockdep_assert_held(&nn->client_lock);
2169 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2170 }
2171 
2172 static void
2173 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2174 {
2175 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2176 	struct sockaddr	*sa = svc_addr(rqstp);
2177 	u32 scopeid = rpc_get_scope_id(sa);
2178 	unsigned short expected_family;
2179 
2180 	/* Currently, we only support tcp and tcp6 for the callback channel */
2181 	if (se->se_callback_netid_len == 3 &&
2182 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2183 		expected_family = AF_INET;
2184 	else if (se->se_callback_netid_len == 4 &&
2185 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2186 		expected_family = AF_INET6;
2187 	else
2188 		goto out_err;
2189 
2190 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2191 					    se->se_callback_addr_len,
2192 					    (struct sockaddr *)&conn->cb_addr,
2193 					    sizeof(conn->cb_addr));
2194 
2195 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2196 		goto out_err;
2197 
2198 	if (conn->cb_addr.ss_family == AF_INET6)
2199 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2200 
2201 	conn->cb_prog = se->se_callback_prog;
2202 	conn->cb_ident = se->se_callback_ident;
2203 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2204 	return;
2205 out_err:
2206 	conn->cb_addr.ss_family = AF_UNSPEC;
2207 	conn->cb_addrlen = 0;
2208 	dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
2209 		"will not receive delegations\n",
2210 		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2211 
2212 	return;
2213 }
2214 
2215 /*
2216  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2217  */
2218 static void
2219 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2220 {
2221 	struct xdr_buf *buf = resp->xdr.buf;
2222 	struct nfsd4_slot *slot = resp->cstate.slot;
2223 	unsigned int base;
2224 
2225 	dprintk("--> %s slot %p\n", __func__, slot);
2226 
2227 	slot->sl_opcnt = resp->opcnt;
2228 	slot->sl_status = resp->cstate.status;
2229 
2230 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2231 	if (nfsd4_not_cached(resp)) {
2232 		slot->sl_datalen = 0;
2233 		return;
2234 	}
2235 	base = resp->cstate.data_offset;
2236 	slot->sl_datalen = buf->len - base;
2237 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2238 		WARN(1, "%s: sessions DRC could not cache compound\n",
2239 		     __func__);
2240 	return;
2241 }
2242 
2243 /*
2244  * Encode the replay sequence operation from the slot values.
2245  * If cachethis is FALSE encode the uncached rep error on the next
2246  * operation which sets resp->p and increments resp->opcnt for
2247  * nfs4svc_encode_compoundres.
2248  *
2249  */
2250 static __be32
2251 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2252 			  struct nfsd4_compoundres *resp)
2253 {
2254 	struct nfsd4_op *op;
2255 	struct nfsd4_slot *slot = resp->cstate.slot;
2256 
2257 	/* Encode the replayed sequence operation */
2258 	op = &args->ops[resp->opcnt - 1];
2259 	nfsd4_encode_operation(resp, op);
2260 
2261 	/* Return nfserr_retry_uncached_rep in next operation. */
2262 	if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2263 		op = &args->ops[resp->opcnt++];
2264 		op->status = nfserr_retry_uncached_rep;
2265 		nfsd4_encode_operation(resp, op);
2266 	}
2267 	return op->status;
2268 }
2269 
2270 /*
2271  * The sequence operation is not cached because we can use the slot and
2272  * session values.
2273  */
2274 static __be32
2275 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2276 			 struct nfsd4_sequence *seq)
2277 {
2278 	struct nfsd4_slot *slot = resp->cstate.slot;
2279 	struct xdr_stream *xdr = &resp->xdr;
2280 	__be32 *p;
2281 	__be32 status;
2282 
2283 	dprintk("--> %s slot %p\n", __func__, slot);
2284 
2285 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2286 	if (status)
2287 		return status;
2288 
2289 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2290 	if (!p) {
2291 		WARN_ON_ONCE(1);
2292 		return nfserr_serverfault;
2293 	}
2294 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2295 	xdr_commit_encode(xdr);
2296 
2297 	resp->opcnt = slot->sl_opcnt;
2298 	return slot->sl_status;
2299 }
2300 
2301 /*
2302  * Set the exchange_id flags returned by the server.
2303  */
2304 static void
2305 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2306 {
2307 #ifdef CONFIG_NFSD_PNFS
2308 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2309 #else
2310 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2311 #endif
2312 
2313 	/* Referrals are supported, Migration is not. */
2314 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2315 
2316 	/* set the wire flags to return to client. */
2317 	clid->flags = new->cl_exchange_flags;
2318 }
2319 
2320 static bool client_has_openowners(struct nfs4_client *clp)
2321 {
2322 	struct nfs4_openowner *oo;
2323 
2324 	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2325 		if (!list_empty(&oo->oo_owner.so_stateids))
2326 			return true;
2327 	}
2328 	return false;
2329 }
2330 
2331 static bool client_has_state(struct nfs4_client *clp)
2332 {
2333 	return client_has_openowners(clp)
2334 #ifdef CONFIG_NFSD_PNFS
2335 		|| !list_empty(&clp->cl_lo_states)
2336 #endif
2337 		|| !list_empty(&clp->cl_delegations)
2338 		|| !list_empty(&clp->cl_sessions);
2339 }
2340 
2341 __be32
2342 nfsd4_exchange_id(struct svc_rqst *rqstp,
2343 		  struct nfsd4_compound_state *cstate,
2344 		  struct nfsd4_exchange_id *exid)
2345 {
2346 	struct nfs4_client *conf, *new;
2347 	struct nfs4_client *unconf = NULL;
2348 	__be32 status;
2349 	char			addr_str[INET6_ADDRSTRLEN];
2350 	nfs4_verifier		verf = exid->verifier;
2351 	struct sockaddr		*sa = svc_addr(rqstp);
2352 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2353 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2354 
2355 	rpc_ntop(sa, addr_str, sizeof(addr_str));
2356 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2357 		"ip_addr=%s flags %x, spa_how %d\n",
2358 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
2359 		addr_str, exid->flags, exid->spa_how);
2360 
2361 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2362 		return nfserr_inval;
2363 
2364 	new = create_client(exid->clname, rqstp, &verf);
2365 	if (new == NULL)
2366 		return nfserr_jukebox;
2367 
2368 	switch (exid->spa_how) {
2369 	case SP4_MACH_CRED:
2370 		exid->spo_must_enforce[0] = 0;
2371 		exid->spo_must_enforce[1] = (
2372 			1 << (OP_BIND_CONN_TO_SESSION - 32) |
2373 			1 << (OP_EXCHANGE_ID - 32) |
2374 			1 << (OP_CREATE_SESSION - 32) |
2375 			1 << (OP_DESTROY_SESSION - 32) |
2376 			1 << (OP_DESTROY_CLIENTID - 32));
2377 
2378 		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2379 					1 << (OP_OPEN_DOWNGRADE) |
2380 					1 << (OP_LOCKU) |
2381 					1 << (OP_DELEGRETURN));
2382 
2383 		exid->spo_must_allow[1] &= (
2384 					1 << (OP_TEST_STATEID - 32) |
2385 					1 << (OP_FREE_STATEID - 32));
2386 		if (!svc_rqst_integrity_protected(rqstp)) {
2387 			status = nfserr_inval;
2388 			goto out_nolock;
2389 		}
2390 		/*
2391 		 * Sometimes userspace doesn't give us a principal.
2392 		 * Which is a bug, really.  Anyway, we can't enforce
2393 		 * MACH_CRED in that case, better to give up now:
2394 		 */
2395 		if (!new->cl_cred.cr_principal &&
2396 					!new->cl_cred.cr_raw_principal) {
2397 			status = nfserr_serverfault;
2398 			goto out_nolock;
2399 		}
2400 		new->cl_mach_cred = true;
2401 	case SP4_NONE:
2402 		break;
2403 	default:				/* checked by xdr code */
2404 		WARN_ON_ONCE(1);
2405 	case SP4_SSV:
2406 		status = nfserr_encr_alg_unsupp;
2407 		goto out_nolock;
2408 	}
2409 
2410 	/* Cases below refer to rfc 5661 section 18.35.4: */
2411 	spin_lock(&nn->client_lock);
2412 	conf = find_confirmed_client_by_name(&exid->clname, nn);
2413 	if (conf) {
2414 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2415 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2416 
2417 		if (update) {
2418 			if (!clp_used_exchangeid(conf)) { /* buggy client */
2419 				status = nfserr_inval;
2420 				goto out;
2421 			}
2422 			if (!nfsd4_mach_creds_match(conf, rqstp)) {
2423 				status = nfserr_wrong_cred;
2424 				goto out;
2425 			}
2426 			if (!creds_match) { /* case 9 */
2427 				status = nfserr_perm;
2428 				goto out;
2429 			}
2430 			if (!verfs_match) { /* case 8 */
2431 				status = nfserr_not_same;
2432 				goto out;
2433 			}
2434 			/* case 6 */
2435 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2436 			goto out_copy;
2437 		}
2438 		if (!creds_match) { /* case 3 */
2439 			if (client_has_state(conf)) {
2440 				status = nfserr_clid_inuse;
2441 				goto out;
2442 			}
2443 			goto out_new;
2444 		}
2445 		if (verfs_match) { /* case 2 */
2446 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2447 			goto out_copy;
2448 		}
2449 		/* case 5, client reboot */
2450 		conf = NULL;
2451 		goto out_new;
2452 	}
2453 
2454 	if (update) { /* case 7 */
2455 		status = nfserr_noent;
2456 		goto out;
2457 	}
2458 
2459 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2460 	if (unconf) /* case 4, possible retry or client restart */
2461 		unhash_client_locked(unconf);
2462 
2463 	/* case 1 (normal case) */
2464 out_new:
2465 	if (conf) {
2466 		status = mark_client_expired_locked(conf);
2467 		if (status)
2468 			goto out;
2469 	}
2470 	new->cl_minorversion = cstate->minorversion;
2471 	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2472 	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2473 
2474 	gen_clid(new, nn);
2475 	add_to_unconfirmed(new);
2476 	swap(new, conf);
2477 out_copy:
2478 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2479 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
2480 
2481 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2482 	nfsd4_set_ex_flags(conf, exid);
2483 
2484 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2485 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2486 	status = nfs_ok;
2487 
2488 out:
2489 	spin_unlock(&nn->client_lock);
2490 out_nolock:
2491 	if (new)
2492 		expire_client(new);
2493 	if (unconf)
2494 		expire_client(unconf);
2495 	return status;
2496 }
2497 
2498 static __be32
2499 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2500 {
2501 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2502 		slot_seqid);
2503 
2504 	/* The slot is in use, and no response has been sent. */
2505 	if (slot_inuse) {
2506 		if (seqid == slot_seqid)
2507 			return nfserr_jukebox;
2508 		else
2509 			return nfserr_seq_misordered;
2510 	}
2511 	/* Note unsigned 32-bit arithmetic handles wraparound: */
2512 	if (likely(seqid == slot_seqid + 1))
2513 		return nfs_ok;
2514 	if (seqid == slot_seqid)
2515 		return nfserr_replay_cache;
2516 	return nfserr_seq_misordered;
2517 }
2518 
2519 /*
2520  * Cache the create session result into the create session single DRC
2521  * slot cache by saving the xdr structure. sl_seqid has been set.
2522  * Do this for solo or embedded create session operations.
2523  */
2524 static void
2525 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2526 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2527 {
2528 	slot->sl_status = nfserr;
2529 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2530 }
2531 
2532 static __be32
2533 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2534 			    struct nfsd4_clid_slot *slot)
2535 {
2536 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2537 	return slot->sl_status;
2538 }
2539 
2540 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
2541 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2542 			1 +	/* MIN tag is length with zero, only length */ \
2543 			3 +	/* version, opcount, opcode */ \
2544 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2545 				/* seqid, slotID, slotID, cache */ \
2546 			4 ) * sizeof(__be32))
2547 
2548 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2549 			2 +	/* verifier: AUTH_NULL, length 0 */\
2550 			1 +	/* status */ \
2551 			1 +	/* MIN tag is length with zero, only length */ \
2552 			3 +	/* opcount, opcode, opstatus*/ \
2553 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2554 				/* seqid, slotID, slotID, slotID, status */ \
2555 			5 ) * sizeof(__be32))
2556 
2557 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2558 {
2559 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2560 
2561 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2562 		return nfserr_toosmall;
2563 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2564 		return nfserr_toosmall;
2565 	ca->headerpadsz = 0;
2566 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2567 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2568 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2569 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2570 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2571 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2572 	/*
2573 	 * Note decreasing slot size below client's request may make it
2574 	 * difficult for client to function correctly, whereas
2575 	 * decreasing the number of slots will (just?) affect
2576 	 * performance.  When short on memory we therefore prefer to
2577 	 * decrease number of slots instead of their size.  Clients that
2578 	 * request larger slots than they need will get poor results:
2579 	 */
2580 	ca->maxreqs = nfsd4_get_drc_mem(ca);
2581 	if (!ca->maxreqs)
2582 		return nfserr_jukebox;
2583 
2584 	return nfs_ok;
2585 }
2586 
2587 /*
2588  * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2589  * These are based on similar macros in linux/sunrpc/msg_prot.h .
2590  */
2591 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2592 	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2593 
2594 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2595 	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2596 
2597 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
2598 				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2599 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
2600 				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2601 				 sizeof(__be32))
2602 
2603 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2604 {
2605 	ca->headerpadsz = 0;
2606 
2607 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2608 		return nfserr_toosmall;
2609 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2610 		return nfserr_toosmall;
2611 	ca->maxresp_cached = 0;
2612 	if (ca->maxops < 2)
2613 		return nfserr_toosmall;
2614 
2615 	return nfs_ok;
2616 }
2617 
2618 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2619 {
2620 	switch (cbs->flavor) {
2621 	case RPC_AUTH_NULL:
2622 	case RPC_AUTH_UNIX:
2623 		return nfs_ok;
2624 	default:
2625 		/*
2626 		 * GSS case: the spec doesn't allow us to return this
2627 		 * error.  But it also doesn't allow us not to support
2628 		 * GSS.
2629 		 * I'd rather this fail hard than return some error the
2630 		 * client might think it can already handle:
2631 		 */
2632 		return nfserr_encr_alg_unsupp;
2633 	}
2634 }
2635 
2636 __be32
2637 nfsd4_create_session(struct svc_rqst *rqstp,
2638 		     struct nfsd4_compound_state *cstate,
2639 		     struct nfsd4_create_session *cr_ses)
2640 {
2641 	struct sockaddr *sa = svc_addr(rqstp);
2642 	struct nfs4_client *conf, *unconf;
2643 	struct nfs4_client *old = NULL;
2644 	struct nfsd4_session *new;
2645 	struct nfsd4_conn *conn;
2646 	struct nfsd4_clid_slot *cs_slot = NULL;
2647 	__be32 status = 0;
2648 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2649 
2650 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2651 		return nfserr_inval;
2652 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2653 	if (status)
2654 		return status;
2655 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2656 	if (status)
2657 		return status;
2658 	status = check_backchannel_attrs(&cr_ses->back_channel);
2659 	if (status)
2660 		goto out_release_drc_mem;
2661 	status = nfserr_jukebox;
2662 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2663 	if (!new)
2664 		goto out_release_drc_mem;
2665 	conn = alloc_conn_from_crses(rqstp, cr_ses);
2666 	if (!conn)
2667 		goto out_free_session;
2668 
2669 	spin_lock(&nn->client_lock);
2670 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2671 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2672 	WARN_ON_ONCE(conf && unconf);
2673 
2674 	if (conf) {
2675 		status = nfserr_wrong_cred;
2676 		if (!nfsd4_mach_creds_match(conf, rqstp))
2677 			goto out_free_conn;
2678 		cs_slot = &conf->cl_cs_slot;
2679 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2680 		if (status) {
2681 			if (status == nfserr_replay_cache)
2682 				status = nfsd4_replay_create_session(cr_ses, cs_slot);
2683 			goto out_free_conn;
2684 		}
2685 	} else if (unconf) {
2686 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2687 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2688 			status = nfserr_clid_inuse;
2689 			goto out_free_conn;
2690 		}
2691 		status = nfserr_wrong_cred;
2692 		if (!nfsd4_mach_creds_match(unconf, rqstp))
2693 			goto out_free_conn;
2694 		cs_slot = &unconf->cl_cs_slot;
2695 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2696 		if (status) {
2697 			/* an unconfirmed replay returns misordered */
2698 			status = nfserr_seq_misordered;
2699 			goto out_free_conn;
2700 		}
2701 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2702 		if (old) {
2703 			status = mark_client_expired_locked(old);
2704 			if (status) {
2705 				old = NULL;
2706 				goto out_free_conn;
2707 			}
2708 		}
2709 		move_to_confirmed(unconf);
2710 		conf = unconf;
2711 	} else {
2712 		status = nfserr_stale_clientid;
2713 		goto out_free_conn;
2714 	}
2715 	status = nfs_ok;
2716 	/* Persistent sessions are not supported */
2717 	cr_ses->flags &= ~SESSION4_PERSIST;
2718 	/* Upshifting from TCP to RDMA is not supported */
2719 	cr_ses->flags &= ~SESSION4_RDMA;
2720 
2721 	init_session(rqstp, new, conf, cr_ses);
2722 	nfsd4_get_session_locked(new);
2723 
2724 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2725 	       NFS4_MAX_SESSIONID_LEN);
2726 	cs_slot->sl_seqid++;
2727 	cr_ses->seqid = cs_slot->sl_seqid;
2728 
2729 	/* cache solo and embedded create sessions under the client_lock */
2730 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
2731 	spin_unlock(&nn->client_lock);
2732 	/* init connection and backchannel */
2733 	nfsd4_init_conn(rqstp, conn, new);
2734 	nfsd4_put_session(new);
2735 	if (old)
2736 		expire_client(old);
2737 	return status;
2738 out_free_conn:
2739 	spin_unlock(&nn->client_lock);
2740 	free_conn(conn);
2741 	if (old)
2742 		expire_client(old);
2743 out_free_session:
2744 	__free_session(new);
2745 out_release_drc_mem:
2746 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
2747 	return status;
2748 }
2749 
2750 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2751 {
2752 	switch (*dir) {
2753 	case NFS4_CDFC4_FORE:
2754 	case NFS4_CDFC4_BACK:
2755 		return nfs_ok;
2756 	case NFS4_CDFC4_FORE_OR_BOTH:
2757 	case NFS4_CDFC4_BACK_OR_BOTH:
2758 		*dir = NFS4_CDFC4_BOTH;
2759 		return nfs_ok;
2760 	};
2761 	return nfserr_inval;
2762 }
2763 
2764 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2765 {
2766 	struct nfsd4_session *session = cstate->session;
2767 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2768 	__be32 status;
2769 
2770 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2771 	if (status)
2772 		return status;
2773 	spin_lock(&nn->client_lock);
2774 	session->se_cb_prog = bc->bc_cb_program;
2775 	session->se_cb_sec = bc->bc_cb_sec;
2776 	spin_unlock(&nn->client_lock);
2777 
2778 	nfsd4_probe_callback(session->se_client);
2779 
2780 	return nfs_ok;
2781 }
2782 
2783 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2784 		     struct nfsd4_compound_state *cstate,
2785 		     struct nfsd4_bind_conn_to_session *bcts)
2786 {
2787 	__be32 status;
2788 	struct nfsd4_conn *conn;
2789 	struct nfsd4_session *session;
2790 	struct net *net = SVC_NET(rqstp);
2791 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2792 
2793 	if (!nfsd4_last_compound_op(rqstp))
2794 		return nfserr_not_only_op;
2795 	spin_lock(&nn->client_lock);
2796 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2797 	spin_unlock(&nn->client_lock);
2798 	if (!session)
2799 		goto out_no_session;
2800 	status = nfserr_wrong_cred;
2801 	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2802 		goto out;
2803 	status = nfsd4_map_bcts_dir(&bcts->dir);
2804 	if (status)
2805 		goto out;
2806 	conn = alloc_conn(rqstp, bcts->dir);
2807 	status = nfserr_jukebox;
2808 	if (!conn)
2809 		goto out;
2810 	nfsd4_init_conn(rqstp, conn, session);
2811 	status = nfs_ok;
2812 out:
2813 	nfsd4_put_session(session);
2814 out_no_session:
2815 	return status;
2816 }
2817 
2818 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2819 {
2820 	if (!session)
2821 		return 0;
2822 	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2823 }
2824 
2825 __be32
2826 nfsd4_destroy_session(struct svc_rqst *r,
2827 		      struct nfsd4_compound_state *cstate,
2828 		      struct nfsd4_destroy_session *sessionid)
2829 {
2830 	struct nfsd4_session *ses;
2831 	__be32 status;
2832 	int ref_held_by_me = 0;
2833 	struct net *net = SVC_NET(r);
2834 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2835 
2836 	status = nfserr_not_only_op;
2837 	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2838 		if (!nfsd4_last_compound_op(r))
2839 			goto out;
2840 		ref_held_by_me++;
2841 	}
2842 	dump_sessionid(__func__, &sessionid->sessionid);
2843 	spin_lock(&nn->client_lock);
2844 	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2845 	if (!ses)
2846 		goto out_client_lock;
2847 	status = nfserr_wrong_cred;
2848 	if (!nfsd4_mach_creds_match(ses->se_client, r))
2849 		goto out_put_session;
2850 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2851 	if (status)
2852 		goto out_put_session;
2853 	unhash_session(ses);
2854 	spin_unlock(&nn->client_lock);
2855 
2856 	nfsd4_probe_callback_sync(ses->se_client);
2857 
2858 	spin_lock(&nn->client_lock);
2859 	status = nfs_ok;
2860 out_put_session:
2861 	nfsd4_put_session_locked(ses);
2862 out_client_lock:
2863 	spin_unlock(&nn->client_lock);
2864 out:
2865 	return status;
2866 }
2867 
2868 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2869 {
2870 	struct nfsd4_conn *c;
2871 
2872 	list_for_each_entry(c, &s->se_conns, cn_persession) {
2873 		if (c->cn_xprt == xpt) {
2874 			return c;
2875 		}
2876 	}
2877 	return NULL;
2878 }
2879 
2880 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2881 {
2882 	struct nfs4_client *clp = ses->se_client;
2883 	struct nfsd4_conn *c;
2884 	__be32 status = nfs_ok;
2885 	int ret;
2886 
2887 	spin_lock(&clp->cl_lock);
2888 	c = __nfsd4_find_conn(new->cn_xprt, ses);
2889 	if (c)
2890 		goto out_free;
2891 	status = nfserr_conn_not_bound_to_session;
2892 	if (clp->cl_mach_cred)
2893 		goto out_free;
2894 	__nfsd4_hash_conn(new, ses);
2895 	spin_unlock(&clp->cl_lock);
2896 	ret = nfsd4_register_conn(new);
2897 	if (ret)
2898 		/* oops; xprt is already down: */
2899 		nfsd4_conn_lost(&new->cn_xpt_user);
2900 	return nfs_ok;
2901 out_free:
2902 	spin_unlock(&clp->cl_lock);
2903 	free_conn(new);
2904 	return status;
2905 }
2906 
2907 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2908 {
2909 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
2910 
2911 	return args->opcnt > session->se_fchannel.maxops;
2912 }
2913 
2914 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2915 				  struct nfsd4_session *session)
2916 {
2917 	struct xdr_buf *xb = &rqstp->rq_arg;
2918 
2919 	return xb->len > session->se_fchannel.maxreq_sz;
2920 }
2921 
2922 __be32
2923 nfsd4_sequence(struct svc_rqst *rqstp,
2924 	       struct nfsd4_compound_state *cstate,
2925 	       struct nfsd4_sequence *seq)
2926 {
2927 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
2928 	struct xdr_stream *xdr = &resp->xdr;
2929 	struct nfsd4_session *session;
2930 	struct nfs4_client *clp;
2931 	struct nfsd4_slot *slot;
2932 	struct nfsd4_conn *conn;
2933 	__be32 status;
2934 	int buflen;
2935 	struct net *net = SVC_NET(rqstp);
2936 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2937 
2938 	if (resp->opcnt != 1)
2939 		return nfserr_sequence_pos;
2940 
2941 	/*
2942 	 * Will be either used or freed by nfsd4_sequence_check_conn
2943 	 * below.
2944 	 */
2945 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2946 	if (!conn)
2947 		return nfserr_jukebox;
2948 
2949 	spin_lock(&nn->client_lock);
2950 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2951 	if (!session)
2952 		goto out_no_session;
2953 	clp = session->se_client;
2954 
2955 	status = nfserr_too_many_ops;
2956 	if (nfsd4_session_too_many_ops(rqstp, session))
2957 		goto out_put_session;
2958 
2959 	status = nfserr_req_too_big;
2960 	if (nfsd4_request_too_big(rqstp, session))
2961 		goto out_put_session;
2962 
2963 	status = nfserr_badslot;
2964 	if (seq->slotid >= session->se_fchannel.maxreqs)
2965 		goto out_put_session;
2966 
2967 	slot = session->se_slots[seq->slotid];
2968 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
2969 
2970 	/* We do not negotiate the number of slots yet, so set the
2971 	 * maxslots to the session maxreqs which is used to encode
2972 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
2973 	seq->maxslots = session->se_fchannel.maxreqs;
2974 
2975 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2976 					slot->sl_flags & NFSD4_SLOT_INUSE);
2977 	if (status == nfserr_replay_cache) {
2978 		status = nfserr_seq_misordered;
2979 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2980 			goto out_put_session;
2981 		cstate->slot = slot;
2982 		cstate->session = session;
2983 		cstate->clp = clp;
2984 		/* Return the cached reply status and set cstate->status
2985 		 * for nfsd4_proc_compound processing */
2986 		status = nfsd4_replay_cache_entry(resp, seq);
2987 		cstate->status = nfserr_replay_cache;
2988 		goto out;
2989 	}
2990 	if (status)
2991 		goto out_put_session;
2992 
2993 	status = nfsd4_sequence_check_conn(conn, session);
2994 	conn = NULL;
2995 	if (status)
2996 		goto out_put_session;
2997 
2998 	buflen = (seq->cachethis) ?
2999 			session->se_fchannel.maxresp_cached :
3000 			session->se_fchannel.maxresp_sz;
3001 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3002 				    nfserr_rep_too_big;
3003 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3004 		goto out_put_session;
3005 	svc_reserve(rqstp, buflen);
3006 
3007 	status = nfs_ok;
3008 	/* Success! bump slot seqid */
3009 	slot->sl_seqid = seq->seqid;
3010 	slot->sl_flags |= NFSD4_SLOT_INUSE;
3011 	if (seq->cachethis)
3012 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3013 	else
3014 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3015 
3016 	cstate->slot = slot;
3017 	cstate->session = session;
3018 	cstate->clp = clp;
3019 
3020 out:
3021 	switch (clp->cl_cb_state) {
3022 	case NFSD4_CB_DOWN:
3023 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3024 		break;
3025 	case NFSD4_CB_FAULT:
3026 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3027 		break;
3028 	default:
3029 		seq->status_flags = 0;
3030 	}
3031 	if (!list_empty(&clp->cl_revoked))
3032 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3033 out_no_session:
3034 	if (conn)
3035 		free_conn(conn);
3036 	spin_unlock(&nn->client_lock);
3037 	return status;
3038 out_put_session:
3039 	nfsd4_put_session_locked(session);
3040 	goto out_no_session;
3041 }
3042 
3043 void
3044 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3045 {
3046 	struct nfsd4_compound_state *cs = &resp->cstate;
3047 
3048 	if (nfsd4_has_session(cs)) {
3049 		if (cs->status != nfserr_replay_cache) {
3050 			nfsd4_store_cache_entry(resp);
3051 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3052 		}
3053 		/* Drop session reference that was taken in nfsd4_sequence() */
3054 		nfsd4_put_session(cs->session);
3055 	} else if (cs->clp)
3056 		put_client_renew(cs->clp);
3057 }
3058 
3059 __be32
3060 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
3061 {
3062 	struct nfs4_client *conf, *unconf;
3063 	struct nfs4_client *clp = NULL;
3064 	__be32 status = 0;
3065 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3066 
3067 	spin_lock(&nn->client_lock);
3068 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3069 	conf = find_confirmed_client(&dc->clientid, true, nn);
3070 	WARN_ON_ONCE(conf && unconf);
3071 
3072 	if (conf) {
3073 		if (client_has_state(conf)) {
3074 			status = nfserr_clientid_busy;
3075 			goto out;
3076 		}
3077 		status = mark_client_expired_locked(conf);
3078 		if (status)
3079 			goto out;
3080 		clp = conf;
3081 	} else if (unconf)
3082 		clp = unconf;
3083 	else {
3084 		status = nfserr_stale_clientid;
3085 		goto out;
3086 	}
3087 	if (!nfsd4_mach_creds_match(clp, rqstp)) {
3088 		clp = NULL;
3089 		status = nfserr_wrong_cred;
3090 		goto out;
3091 	}
3092 	unhash_client_locked(clp);
3093 out:
3094 	spin_unlock(&nn->client_lock);
3095 	if (clp)
3096 		expire_client(clp);
3097 	return status;
3098 }
3099 
3100 __be32
3101 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
3102 {
3103 	__be32 status = 0;
3104 
3105 	if (rc->rca_one_fs) {
3106 		if (!cstate->current_fh.fh_dentry)
3107 			return nfserr_nofilehandle;
3108 		/*
3109 		 * We don't take advantage of the rca_one_fs case.
3110 		 * That's OK, it's optional, we can safely ignore it.
3111 		 */
3112 		return nfs_ok;
3113 	}
3114 
3115 	status = nfserr_complete_already;
3116 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3117 			     &cstate->session->se_client->cl_flags))
3118 		goto out;
3119 
3120 	status = nfserr_stale_clientid;
3121 	if (is_client_expired(cstate->session->se_client))
3122 		/*
3123 		 * The following error isn't really legal.
3124 		 * But we only get here if the client just explicitly
3125 		 * destroyed the client.  Surely it no longer cares what
3126 		 * error it gets back on an operation for the dead
3127 		 * client.
3128 		 */
3129 		goto out;
3130 
3131 	status = nfs_ok;
3132 	nfsd4_client_record_create(cstate->session->se_client);
3133 out:
3134 	return status;
3135 }
3136 
3137 __be32
3138 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3139 		  struct nfsd4_setclientid *setclid)
3140 {
3141 	struct xdr_netobj 	clname = setclid->se_name;
3142 	nfs4_verifier		clverifier = setclid->se_verf;
3143 	struct nfs4_client	*conf, *new;
3144 	struct nfs4_client	*unconf = NULL;
3145 	__be32 			status;
3146 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3147 
3148 	new = create_client(clname, rqstp, &clverifier);
3149 	if (new == NULL)
3150 		return nfserr_jukebox;
3151 	/* Cases below refer to rfc 3530 section 14.2.33: */
3152 	spin_lock(&nn->client_lock);
3153 	conf = find_confirmed_client_by_name(&clname, nn);
3154 	if (conf && client_has_state(conf)) {
3155 		/* case 0: */
3156 		status = nfserr_clid_inuse;
3157 		if (clp_used_exchangeid(conf))
3158 			goto out;
3159 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3160 			char addr_str[INET6_ADDRSTRLEN];
3161 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3162 				 sizeof(addr_str));
3163 			dprintk("NFSD: setclientid: string in use by client "
3164 				"at %s\n", addr_str);
3165 			goto out;
3166 		}
3167 	}
3168 	unconf = find_unconfirmed_client_by_name(&clname, nn);
3169 	if (unconf)
3170 		unhash_client_locked(unconf);
3171 	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3172 		/* case 1: probable callback update */
3173 		copy_clid(new, conf);
3174 		gen_confirm(new, nn);
3175 	} else /* case 4 (new client) or cases 2, 3 (client reboot): */
3176 		gen_clid(new, nn);
3177 	new->cl_minorversion = 0;
3178 	gen_callback(new, setclid, rqstp);
3179 	add_to_unconfirmed(new);
3180 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3181 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3182 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3183 	new = NULL;
3184 	status = nfs_ok;
3185 out:
3186 	spin_unlock(&nn->client_lock);
3187 	if (new)
3188 		free_client(new);
3189 	if (unconf)
3190 		expire_client(unconf);
3191 	return status;
3192 }
3193 
3194 
3195 __be32
3196 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3197 			 struct nfsd4_compound_state *cstate,
3198 			 struct nfsd4_setclientid_confirm *setclientid_confirm)
3199 {
3200 	struct nfs4_client *conf, *unconf;
3201 	struct nfs4_client *old = NULL;
3202 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3203 	clientid_t * clid = &setclientid_confirm->sc_clientid;
3204 	__be32 status;
3205 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3206 
3207 	if (STALE_CLIENTID(clid, nn))
3208 		return nfserr_stale_clientid;
3209 
3210 	spin_lock(&nn->client_lock);
3211 	conf = find_confirmed_client(clid, false, nn);
3212 	unconf = find_unconfirmed_client(clid, false, nn);
3213 	/*
3214 	 * We try hard to give out unique clientid's, so if we get an
3215 	 * attempt to confirm the same clientid with a different cred,
3216 	 * the client may be buggy; this should never happen.
3217 	 *
3218 	 * Nevertheless, RFC 7530 recommends INUSE for this case:
3219 	 */
3220 	status = nfserr_clid_inuse;
3221 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3222 		goto out;
3223 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3224 		goto out;
3225 	/* cases below refer to rfc 3530 section 14.2.34: */
3226 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3227 		if (conf && !unconf) /* case 2: probable retransmit */
3228 			status = nfs_ok;
3229 		else /* case 4: client hasn't noticed we rebooted yet? */
3230 			status = nfserr_stale_clientid;
3231 		goto out;
3232 	}
3233 	status = nfs_ok;
3234 	if (conf) { /* case 1: callback update */
3235 		old = unconf;
3236 		unhash_client_locked(old);
3237 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3238 	} else { /* case 3: normal case; new or rebooted client */
3239 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3240 		if (old) {
3241 			status = nfserr_clid_inuse;
3242 			if (client_has_state(old)
3243 					&& !same_creds(&unconf->cl_cred,
3244 							&old->cl_cred))
3245 				goto out;
3246 			status = mark_client_expired_locked(old);
3247 			if (status) {
3248 				old = NULL;
3249 				goto out;
3250 			}
3251 		}
3252 		move_to_confirmed(unconf);
3253 		conf = unconf;
3254 	}
3255 	get_client_locked(conf);
3256 	spin_unlock(&nn->client_lock);
3257 	nfsd4_probe_callback(conf);
3258 	spin_lock(&nn->client_lock);
3259 	put_client_renew_locked(conf);
3260 out:
3261 	spin_unlock(&nn->client_lock);
3262 	if (old)
3263 		expire_client(old);
3264 	return status;
3265 }
3266 
3267 static struct nfs4_file *nfsd4_alloc_file(void)
3268 {
3269 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
3270 }
3271 
3272 /* OPEN Share state helper functions */
3273 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3274 				struct nfs4_file *fp)
3275 {
3276 	lockdep_assert_held(&state_lock);
3277 
3278 	atomic_set(&fp->fi_ref, 1);
3279 	spin_lock_init(&fp->fi_lock);
3280 	INIT_LIST_HEAD(&fp->fi_stateids);
3281 	INIT_LIST_HEAD(&fp->fi_delegations);
3282 	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3283 	fh_copy_shallow(&fp->fi_fhandle, fh);
3284 	fp->fi_deleg_file = NULL;
3285 	fp->fi_had_conflict = false;
3286 	fp->fi_share_deny = 0;
3287 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3288 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
3289 #ifdef CONFIG_NFSD_PNFS
3290 	INIT_LIST_HEAD(&fp->fi_lo_states);
3291 	atomic_set(&fp->fi_lo_recalls, 0);
3292 #endif
3293 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3294 }
3295 
3296 void
3297 nfsd4_free_slabs(void)
3298 {
3299 	kmem_cache_destroy(odstate_slab);
3300 	kmem_cache_destroy(openowner_slab);
3301 	kmem_cache_destroy(lockowner_slab);
3302 	kmem_cache_destroy(file_slab);
3303 	kmem_cache_destroy(stateid_slab);
3304 	kmem_cache_destroy(deleg_slab);
3305 }
3306 
3307 int
3308 nfsd4_init_slabs(void)
3309 {
3310 	openowner_slab = kmem_cache_create("nfsd4_openowners",
3311 			sizeof(struct nfs4_openowner), 0, 0, NULL);
3312 	if (openowner_slab == NULL)
3313 		goto out;
3314 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3315 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
3316 	if (lockowner_slab == NULL)
3317 		goto out_free_openowner_slab;
3318 	file_slab = kmem_cache_create("nfsd4_files",
3319 			sizeof(struct nfs4_file), 0, 0, NULL);
3320 	if (file_slab == NULL)
3321 		goto out_free_lockowner_slab;
3322 	stateid_slab = kmem_cache_create("nfsd4_stateids",
3323 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3324 	if (stateid_slab == NULL)
3325 		goto out_free_file_slab;
3326 	deleg_slab = kmem_cache_create("nfsd4_delegations",
3327 			sizeof(struct nfs4_delegation), 0, 0, NULL);
3328 	if (deleg_slab == NULL)
3329 		goto out_free_stateid_slab;
3330 	odstate_slab = kmem_cache_create("nfsd4_odstate",
3331 			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3332 	if (odstate_slab == NULL)
3333 		goto out_free_deleg_slab;
3334 	return 0;
3335 
3336 out_free_deleg_slab:
3337 	kmem_cache_destroy(deleg_slab);
3338 out_free_stateid_slab:
3339 	kmem_cache_destroy(stateid_slab);
3340 out_free_file_slab:
3341 	kmem_cache_destroy(file_slab);
3342 out_free_lockowner_slab:
3343 	kmem_cache_destroy(lockowner_slab);
3344 out_free_openowner_slab:
3345 	kmem_cache_destroy(openowner_slab);
3346 out:
3347 	dprintk("nfsd4: out of memory while initializing nfsv4\n");
3348 	return -ENOMEM;
3349 }
3350 
3351 static void init_nfs4_replay(struct nfs4_replay *rp)
3352 {
3353 	rp->rp_status = nfserr_serverfault;
3354 	rp->rp_buflen = 0;
3355 	rp->rp_buf = rp->rp_ibuf;
3356 	mutex_init(&rp->rp_mutex);
3357 }
3358 
3359 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3360 		struct nfs4_stateowner *so)
3361 {
3362 	if (!nfsd4_has_session(cstate)) {
3363 		mutex_lock(&so->so_replay.rp_mutex);
3364 		cstate->replay_owner = nfs4_get_stateowner(so);
3365 	}
3366 }
3367 
3368 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3369 {
3370 	struct nfs4_stateowner *so = cstate->replay_owner;
3371 
3372 	if (so != NULL) {
3373 		cstate->replay_owner = NULL;
3374 		mutex_unlock(&so->so_replay.rp_mutex);
3375 		nfs4_put_stateowner(so);
3376 	}
3377 }
3378 
3379 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3380 {
3381 	struct nfs4_stateowner *sop;
3382 
3383 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
3384 	if (!sop)
3385 		return NULL;
3386 
3387 	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3388 	if (!sop->so_owner.data) {
3389 		kmem_cache_free(slab, sop);
3390 		return NULL;
3391 	}
3392 	sop->so_owner.len = owner->len;
3393 
3394 	INIT_LIST_HEAD(&sop->so_stateids);
3395 	sop->so_client = clp;
3396 	init_nfs4_replay(&sop->so_replay);
3397 	atomic_set(&sop->so_count, 1);
3398 	return sop;
3399 }
3400 
3401 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3402 {
3403 	lockdep_assert_held(&clp->cl_lock);
3404 
3405 	list_add(&oo->oo_owner.so_strhash,
3406 		 &clp->cl_ownerstr_hashtbl[strhashval]);
3407 	list_add(&oo->oo_perclient, &clp->cl_openowners);
3408 }
3409 
3410 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3411 {
3412 	unhash_openowner_locked(openowner(so));
3413 }
3414 
3415 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3416 {
3417 	struct nfs4_openowner *oo = openowner(so);
3418 
3419 	kmem_cache_free(openowner_slab, oo);
3420 }
3421 
3422 static const struct nfs4_stateowner_operations openowner_ops = {
3423 	.so_unhash =	nfs4_unhash_openowner,
3424 	.so_free =	nfs4_free_openowner,
3425 };
3426 
3427 static struct nfs4_ol_stateid *
3428 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3429 {
3430 	struct nfs4_ol_stateid *local, *ret = NULL;
3431 	struct nfs4_openowner *oo = open->op_openowner;
3432 
3433 	lockdep_assert_held(&fp->fi_lock);
3434 
3435 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3436 		/* ignore lock owners */
3437 		if (local->st_stateowner->so_is_open_owner == 0)
3438 			continue;
3439 		if (local->st_stateowner == &oo->oo_owner) {
3440 			ret = local;
3441 			atomic_inc(&ret->st_stid.sc_count);
3442 			break;
3443 		}
3444 	}
3445 	return ret;
3446 }
3447 
3448 static struct nfs4_openowner *
3449 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3450 			   struct nfsd4_compound_state *cstate)
3451 {
3452 	struct nfs4_client *clp = cstate->clp;
3453 	struct nfs4_openowner *oo, *ret;
3454 
3455 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3456 	if (!oo)
3457 		return NULL;
3458 	oo->oo_owner.so_ops = &openowner_ops;
3459 	oo->oo_owner.so_is_open_owner = 1;
3460 	oo->oo_owner.so_seqid = open->op_seqid;
3461 	oo->oo_flags = 0;
3462 	if (nfsd4_has_session(cstate))
3463 		oo->oo_flags |= NFS4_OO_CONFIRMED;
3464 	oo->oo_time = 0;
3465 	oo->oo_last_closed_stid = NULL;
3466 	INIT_LIST_HEAD(&oo->oo_close_lru);
3467 	spin_lock(&clp->cl_lock);
3468 	ret = find_openstateowner_str_locked(strhashval, open, clp);
3469 	if (ret == NULL) {
3470 		hash_openowner(oo, clp, strhashval);
3471 		ret = oo;
3472 	} else
3473 		nfs4_free_stateowner(&oo->oo_owner);
3474 
3475 	spin_unlock(&clp->cl_lock);
3476 	return ret;
3477 }
3478 
3479 static struct nfs4_ol_stateid *
3480 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3481 {
3482 
3483 	struct nfs4_openowner *oo = open->op_openowner;
3484 	struct nfs4_ol_stateid *retstp = NULL;
3485 	struct nfs4_ol_stateid *stp;
3486 
3487 	stp = open->op_stp;
3488 	/* We are moving these outside of the spinlocks to avoid the warnings */
3489 	mutex_init(&stp->st_mutex);
3490 	mutex_lock(&stp->st_mutex);
3491 
3492 	spin_lock(&oo->oo_owner.so_client->cl_lock);
3493 	spin_lock(&fp->fi_lock);
3494 
3495 	retstp = nfsd4_find_existing_open(fp, open);
3496 	if (retstp)
3497 		goto out_unlock;
3498 
3499 	open->op_stp = NULL;
3500 	atomic_inc(&stp->st_stid.sc_count);
3501 	stp->st_stid.sc_type = NFS4_OPEN_STID;
3502 	INIT_LIST_HEAD(&stp->st_locks);
3503 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3504 	get_nfs4_file(fp);
3505 	stp->st_stid.sc_file = fp;
3506 	stp->st_access_bmap = 0;
3507 	stp->st_deny_bmap = 0;
3508 	stp->st_openstp = NULL;
3509 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3510 	list_add(&stp->st_perfile, &fp->fi_stateids);
3511 
3512 out_unlock:
3513 	spin_unlock(&fp->fi_lock);
3514 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
3515 	if (retstp) {
3516 		mutex_lock(&retstp->st_mutex);
3517 		/* To keep mutex tracking happy */
3518 		mutex_unlock(&stp->st_mutex);
3519 		stp = retstp;
3520 	}
3521 	return stp;
3522 }
3523 
3524 /*
3525  * In the 4.0 case we need to keep the owners around a little while to handle
3526  * CLOSE replay. We still do need to release any file access that is held by
3527  * them before returning however.
3528  */
3529 static void
3530 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3531 {
3532 	struct nfs4_ol_stateid *last;
3533 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
3534 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3535 						nfsd_net_id);
3536 
3537 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3538 
3539 	/*
3540 	 * We know that we hold one reference via nfsd4_close, and another
3541 	 * "persistent" reference for the client. If the refcount is higher
3542 	 * than 2, then there are still calls in progress that are using this
3543 	 * stateid. We can't put the sc_file reference until they are finished.
3544 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
3545 	 * there should be no danger of the refcount going back up again at
3546 	 * this point.
3547 	 */
3548 	wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3549 
3550 	release_all_access(s);
3551 	if (s->st_stid.sc_file) {
3552 		put_nfs4_file(s->st_stid.sc_file);
3553 		s->st_stid.sc_file = NULL;
3554 	}
3555 
3556 	spin_lock(&nn->client_lock);
3557 	last = oo->oo_last_closed_stid;
3558 	oo->oo_last_closed_stid = s;
3559 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3560 	oo->oo_time = get_seconds();
3561 	spin_unlock(&nn->client_lock);
3562 	if (last)
3563 		nfs4_put_stid(&last->st_stid);
3564 }
3565 
3566 /* search file_hashtbl[] for file */
3567 static struct nfs4_file *
3568 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3569 {
3570 	struct nfs4_file *fp;
3571 
3572 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3573 		if (fh_match(&fp->fi_fhandle, fh)) {
3574 			if (atomic_inc_not_zero(&fp->fi_ref))
3575 				return fp;
3576 		}
3577 	}
3578 	return NULL;
3579 }
3580 
3581 struct nfs4_file *
3582 find_file(struct knfsd_fh *fh)
3583 {
3584 	struct nfs4_file *fp;
3585 	unsigned int hashval = file_hashval(fh);
3586 
3587 	rcu_read_lock();
3588 	fp = find_file_locked(fh, hashval);
3589 	rcu_read_unlock();
3590 	return fp;
3591 }
3592 
3593 static struct nfs4_file *
3594 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3595 {
3596 	struct nfs4_file *fp;
3597 	unsigned int hashval = file_hashval(fh);
3598 
3599 	rcu_read_lock();
3600 	fp = find_file_locked(fh, hashval);
3601 	rcu_read_unlock();
3602 	if (fp)
3603 		return fp;
3604 
3605 	spin_lock(&state_lock);
3606 	fp = find_file_locked(fh, hashval);
3607 	if (likely(fp == NULL)) {
3608 		nfsd4_init_file(fh, hashval, new);
3609 		fp = new;
3610 	}
3611 	spin_unlock(&state_lock);
3612 
3613 	return fp;
3614 }
3615 
3616 /*
3617  * Called to check deny when READ with all zero stateid or
3618  * WRITE with all zero or all one stateid
3619  */
3620 static __be32
3621 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3622 {
3623 	struct nfs4_file *fp;
3624 	__be32 ret = nfs_ok;
3625 
3626 	fp = find_file(&current_fh->fh_handle);
3627 	if (!fp)
3628 		return ret;
3629 	/* Check for conflicting share reservations */
3630 	spin_lock(&fp->fi_lock);
3631 	if (fp->fi_share_deny & deny_type)
3632 		ret = nfserr_locked;
3633 	spin_unlock(&fp->fi_lock);
3634 	put_nfs4_file(fp);
3635 	return ret;
3636 }
3637 
3638 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3639 {
3640 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3641 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3642 					  nfsd_net_id);
3643 
3644 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3645 
3646 	/*
3647 	 * We can't do this in nfsd_break_deleg_cb because it is
3648 	 * already holding inode->i_lock.
3649 	 *
3650 	 * If the dl_time != 0, then we know that it has already been
3651 	 * queued for a lease break. Don't queue it again.
3652 	 */
3653 	spin_lock(&state_lock);
3654 	if (dp->dl_time == 0) {
3655 		dp->dl_time = get_seconds();
3656 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3657 	}
3658 	spin_unlock(&state_lock);
3659 }
3660 
3661 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3662 		struct rpc_task *task)
3663 {
3664 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3665 
3666 	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3667 	        return 1;
3668 
3669 	switch (task->tk_status) {
3670 	case 0:
3671 		return 1;
3672 	case -EBADHANDLE:
3673 	case -NFS4ERR_BAD_STATEID:
3674 		/*
3675 		 * Race: client probably got cb_recall before open reply
3676 		 * granting delegation.
3677 		 */
3678 		if (dp->dl_retries--) {
3679 			rpc_delay(task, 2 * HZ);
3680 			return 0;
3681 		}
3682 		/*FALLTHRU*/
3683 	default:
3684 		return -1;
3685 	}
3686 }
3687 
3688 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3689 {
3690 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3691 
3692 	nfs4_put_stid(&dp->dl_stid);
3693 }
3694 
3695 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3696 	.prepare	= nfsd4_cb_recall_prepare,
3697 	.done		= nfsd4_cb_recall_done,
3698 	.release	= nfsd4_cb_recall_release,
3699 };
3700 
3701 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3702 {
3703 	/*
3704 	 * We're assuming the state code never drops its reference
3705 	 * without first removing the lease.  Since we're in this lease
3706 	 * callback (and since the lease code is serialized by the kernel
3707 	 * lock) we know the server hasn't removed the lease yet, we know
3708 	 * it's safe to take a reference.
3709 	 */
3710 	atomic_inc(&dp->dl_stid.sc_count);
3711 	nfsd4_run_cb(&dp->dl_recall);
3712 }
3713 
3714 /* Called from break_lease() with i_lock held. */
3715 static bool
3716 nfsd_break_deleg_cb(struct file_lock *fl)
3717 {
3718 	bool ret = false;
3719 	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3720 	struct nfs4_delegation *dp;
3721 
3722 	if (!fp) {
3723 		WARN(1, "(%p)->fl_owner NULL\n", fl);
3724 		return ret;
3725 	}
3726 	if (fp->fi_had_conflict) {
3727 		WARN(1, "duplicate break on %p\n", fp);
3728 		return ret;
3729 	}
3730 	/*
3731 	 * We don't want the locks code to timeout the lease for us;
3732 	 * we'll remove it ourself if a delegation isn't returned
3733 	 * in time:
3734 	 */
3735 	fl->fl_break_time = 0;
3736 
3737 	spin_lock(&fp->fi_lock);
3738 	fp->fi_had_conflict = true;
3739 	/*
3740 	 * If there are no delegations on the list, then return true
3741 	 * so that the lease code will go ahead and delete it.
3742 	 */
3743 	if (list_empty(&fp->fi_delegations))
3744 		ret = true;
3745 	else
3746 		list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3747 			nfsd_break_one_deleg(dp);
3748 	spin_unlock(&fp->fi_lock);
3749 	return ret;
3750 }
3751 
3752 static int
3753 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3754 		     struct list_head *dispose)
3755 {
3756 	if (arg & F_UNLCK)
3757 		return lease_modify(onlist, arg, dispose);
3758 	else
3759 		return -EAGAIN;
3760 }
3761 
3762 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3763 	.lm_break = nfsd_break_deleg_cb,
3764 	.lm_change = nfsd_change_deleg_cb,
3765 };
3766 
3767 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3768 {
3769 	if (nfsd4_has_session(cstate))
3770 		return nfs_ok;
3771 	if (seqid == so->so_seqid - 1)
3772 		return nfserr_replay_me;
3773 	if (seqid == so->so_seqid)
3774 		return nfs_ok;
3775 	return nfserr_bad_seqid;
3776 }
3777 
3778 static __be32 lookup_clientid(clientid_t *clid,
3779 		struct nfsd4_compound_state *cstate,
3780 		struct nfsd_net *nn)
3781 {
3782 	struct nfs4_client *found;
3783 
3784 	if (cstate->clp) {
3785 		found = cstate->clp;
3786 		if (!same_clid(&found->cl_clientid, clid))
3787 			return nfserr_stale_clientid;
3788 		return nfs_ok;
3789 	}
3790 
3791 	if (STALE_CLIENTID(clid, nn))
3792 		return nfserr_stale_clientid;
3793 
3794 	/*
3795 	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3796 	 * cached already then we know this is for is for v4.0 and "sessions"
3797 	 * will be false.
3798 	 */
3799 	WARN_ON_ONCE(cstate->session);
3800 	spin_lock(&nn->client_lock);
3801 	found = find_confirmed_client(clid, false, nn);
3802 	if (!found) {
3803 		spin_unlock(&nn->client_lock);
3804 		return nfserr_expired;
3805 	}
3806 	atomic_inc(&found->cl_refcount);
3807 	spin_unlock(&nn->client_lock);
3808 
3809 	/* Cache the nfs4_client in cstate! */
3810 	cstate->clp = found;
3811 	return nfs_ok;
3812 }
3813 
3814 __be32
3815 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3816 		    struct nfsd4_open *open, struct nfsd_net *nn)
3817 {
3818 	clientid_t *clientid = &open->op_clientid;
3819 	struct nfs4_client *clp = NULL;
3820 	unsigned int strhashval;
3821 	struct nfs4_openowner *oo = NULL;
3822 	__be32 status;
3823 
3824 	if (STALE_CLIENTID(&open->op_clientid, nn))
3825 		return nfserr_stale_clientid;
3826 	/*
3827 	 * In case we need it later, after we've already created the
3828 	 * file and don't want to risk a further failure:
3829 	 */
3830 	open->op_file = nfsd4_alloc_file();
3831 	if (open->op_file == NULL)
3832 		return nfserr_jukebox;
3833 
3834 	status = lookup_clientid(clientid, cstate, nn);
3835 	if (status)
3836 		return status;
3837 	clp = cstate->clp;
3838 
3839 	strhashval = ownerstr_hashval(&open->op_owner);
3840 	oo = find_openstateowner_str(strhashval, open, clp);
3841 	open->op_openowner = oo;
3842 	if (!oo) {
3843 		goto new_owner;
3844 	}
3845 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3846 		/* Replace unconfirmed owners without checking for replay. */
3847 		release_openowner(oo);
3848 		open->op_openowner = NULL;
3849 		goto new_owner;
3850 	}
3851 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3852 	if (status)
3853 		return status;
3854 	goto alloc_stateid;
3855 new_owner:
3856 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
3857 	if (oo == NULL)
3858 		return nfserr_jukebox;
3859 	open->op_openowner = oo;
3860 alloc_stateid:
3861 	open->op_stp = nfs4_alloc_open_stateid(clp);
3862 	if (!open->op_stp)
3863 		return nfserr_jukebox;
3864 
3865 	if (nfsd4_has_session(cstate) &&
3866 	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
3867 		open->op_odstate = alloc_clnt_odstate(clp);
3868 		if (!open->op_odstate)
3869 			return nfserr_jukebox;
3870 	}
3871 
3872 	return nfs_ok;
3873 }
3874 
3875 static inline __be32
3876 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3877 {
3878 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3879 		return nfserr_openmode;
3880 	else
3881 		return nfs_ok;
3882 }
3883 
3884 static int share_access_to_flags(u32 share_access)
3885 {
3886 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3887 }
3888 
3889 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3890 {
3891 	struct nfs4_stid *ret;
3892 
3893 	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3894 	if (!ret)
3895 		return NULL;
3896 	return delegstateid(ret);
3897 }
3898 
3899 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3900 {
3901 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3902 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3903 }
3904 
3905 static __be32
3906 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3907 		struct nfs4_delegation **dp)
3908 {
3909 	int flags;
3910 	__be32 status = nfserr_bad_stateid;
3911 	struct nfs4_delegation *deleg;
3912 
3913 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3914 	if (deleg == NULL)
3915 		goto out;
3916 	flags = share_access_to_flags(open->op_share_access);
3917 	status = nfs4_check_delegmode(deleg, flags);
3918 	if (status) {
3919 		nfs4_put_stid(&deleg->dl_stid);
3920 		goto out;
3921 	}
3922 	*dp = deleg;
3923 out:
3924 	if (!nfsd4_is_deleg_cur(open))
3925 		return nfs_ok;
3926 	if (status)
3927 		return status;
3928 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3929 	return nfs_ok;
3930 }
3931 
3932 static inline int nfs4_access_to_access(u32 nfs4_access)
3933 {
3934 	int flags = 0;
3935 
3936 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3937 		flags |= NFSD_MAY_READ;
3938 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3939 		flags |= NFSD_MAY_WRITE;
3940 	return flags;
3941 }
3942 
3943 static inline __be32
3944 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3945 		struct nfsd4_open *open)
3946 {
3947 	struct iattr iattr = {
3948 		.ia_valid = ATTR_SIZE,
3949 		.ia_size = 0,
3950 	};
3951 	if (!open->op_truncate)
3952 		return 0;
3953 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3954 		return nfserr_inval;
3955 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3956 }
3957 
3958 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3959 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3960 		struct nfsd4_open *open)
3961 {
3962 	struct file *filp = NULL;
3963 	__be32 status;
3964 	int oflag = nfs4_access_to_omode(open->op_share_access);
3965 	int access = nfs4_access_to_access(open->op_share_access);
3966 	unsigned char old_access_bmap, old_deny_bmap;
3967 
3968 	spin_lock(&fp->fi_lock);
3969 
3970 	/*
3971 	 * Are we trying to set a deny mode that would conflict with
3972 	 * current access?
3973 	 */
3974 	status = nfs4_file_check_deny(fp, open->op_share_deny);
3975 	if (status != nfs_ok) {
3976 		spin_unlock(&fp->fi_lock);
3977 		goto out;
3978 	}
3979 
3980 	/* set access to the file */
3981 	status = nfs4_file_get_access(fp, open->op_share_access);
3982 	if (status != nfs_ok) {
3983 		spin_unlock(&fp->fi_lock);
3984 		goto out;
3985 	}
3986 
3987 	/* Set access bits in stateid */
3988 	old_access_bmap = stp->st_access_bmap;
3989 	set_access(open->op_share_access, stp);
3990 
3991 	/* Set new deny mask */
3992 	old_deny_bmap = stp->st_deny_bmap;
3993 	set_deny(open->op_share_deny, stp);
3994 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3995 
3996 	if (!fp->fi_fds[oflag]) {
3997 		spin_unlock(&fp->fi_lock);
3998 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3999 		if (status)
4000 			goto out_put_access;
4001 		spin_lock(&fp->fi_lock);
4002 		if (!fp->fi_fds[oflag]) {
4003 			fp->fi_fds[oflag] = filp;
4004 			filp = NULL;
4005 		}
4006 	}
4007 	spin_unlock(&fp->fi_lock);
4008 	if (filp)
4009 		fput(filp);
4010 
4011 	status = nfsd4_truncate(rqstp, cur_fh, open);
4012 	if (status)
4013 		goto out_put_access;
4014 out:
4015 	return status;
4016 out_put_access:
4017 	stp->st_access_bmap = old_access_bmap;
4018 	nfs4_file_put_access(fp, open->op_share_access);
4019 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4020 	goto out;
4021 }
4022 
4023 static __be32
4024 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4025 {
4026 	__be32 status;
4027 	unsigned char old_deny_bmap = stp->st_deny_bmap;
4028 
4029 	if (!test_access(open->op_share_access, stp))
4030 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4031 
4032 	/* test and set deny mode */
4033 	spin_lock(&fp->fi_lock);
4034 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4035 	if (status == nfs_ok) {
4036 		set_deny(open->op_share_deny, stp);
4037 		fp->fi_share_deny |=
4038 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4039 	}
4040 	spin_unlock(&fp->fi_lock);
4041 
4042 	if (status != nfs_ok)
4043 		return status;
4044 
4045 	status = nfsd4_truncate(rqstp, cur_fh, open);
4046 	if (status != nfs_ok)
4047 		reset_union_bmap_deny(old_deny_bmap, stp);
4048 	return status;
4049 }
4050 
4051 /* Should we give out recallable state?: */
4052 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4053 {
4054 	if (clp->cl_cb_state == NFSD4_CB_UP)
4055 		return true;
4056 	/*
4057 	 * In the sessions case, since we don't have to establish a
4058 	 * separate connection for callbacks, we assume it's OK
4059 	 * until we hear otherwise:
4060 	 */
4061 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4062 }
4063 
4064 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4065 {
4066 	struct file_lock *fl;
4067 
4068 	fl = locks_alloc_lock();
4069 	if (!fl)
4070 		return NULL;
4071 	fl->fl_lmops = &nfsd_lease_mng_ops;
4072 	fl->fl_flags = FL_DELEG;
4073 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4074 	fl->fl_end = OFFSET_MAX;
4075 	fl->fl_owner = (fl_owner_t)fp;
4076 	fl->fl_pid = current->tgid;
4077 	return fl;
4078 }
4079 
4080 /**
4081  * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4082  * @dp:   a pointer to the nfs4_delegation we're adding.
4083  *
4084  * Return:
4085  *      On success: Return code will be 0 on success.
4086  *
4087  *      On error: -EAGAIN if there was an existing delegation.
4088  *                 nonzero if there is an error in other cases.
4089  *
4090  */
4091 
4092 static int nfs4_setlease(struct nfs4_delegation *dp)
4093 {
4094 	struct nfs4_file *fp = dp->dl_stid.sc_file;
4095 	struct file_lock *fl;
4096 	struct file *filp;
4097 	int status = 0;
4098 
4099 	fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4100 	if (!fl)
4101 		return -ENOMEM;
4102 	filp = find_readable_file(fp);
4103 	if (!filp) {
4104 		/* We should always have a readable file here */
4105 		WARN_ON_ONCE(1);
4106 		locks_free_lock(fl);
4107 		return -EBADF;
4108 	}
4109 	fl->fl_file = filp;
4110 	status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4111 	if (fl)
4112 		locks_free_lock(fl);
4113 	if (status)
4114 		goto out_fput;
4115 	spin_lock(&state_lock);
4116 	spin_lock(&fp->fi_lock);
4117 	/* Did the lease get broken before we took the lock? */
4118 	status = -EAGAIN;
4119 	if (fp->fi_had_conflict)
4120 		goto out_unlock;
4121 	/* Race breaker */
4122 	if (fp->fi_deleg_file) {
4123 		status = hash_delegation_locked(dp, fp);
4124 		goto out_unlock;
4125 	}
4126 	fp->fi_deleg_file = filp;
4127 	fp->fi_delegees = 0;
4128 	status = hash_delegation_locked(dp, fp);
4129 	spin_unlock(&fp->fi_lock);
4130 	spin_unlock(&state_lock);
4131 	if (status) {
4132 		/* Should never happen, this is a new fi_deleg_file  */
4133 		WARN_ON_ONCE(1);
4134 		goto out_fput;
4135 	}
4136 	return 0;
4137 out_unlock:
4138 	spin_unlock(&fp->fi_lock);
4139 	spin_unlock(&state_lock);
4140 out_fput:
4141 	fput(filp);
4142 	return status;
4143 }
4144 
4145 static struct nfs4_delegation *
4146 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4147 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4148 {
4149 	int status;
4150 	struct nfs4_delegation *dp;
4151 
4152 	if (fp->fi_had_conflict)
4153 		return ERR_PTR(-EAGAIN);
4154 
4155 	spin_lock(&state_lock);
4156 	spin_lock(&fp->fi_lock);
4157 	status = nfs4_get_existing_delegation(clp, fp);
4158 	spin_unlock(&fp->fi_lock);
4159 	spin_unlock(&state_lock);
4160 
4161 	if (status)
4162 		return ERR_PTR(status);
4163 
4164 	dp = alloc_init_deleg(clp, fh, odstate);
4165 	if (!dp)
4166 		return ERR_PTR(-ENOMEM);
4167 
4168 	get_nfs4_file(fp);
4169 	spin_lock(&state_lock);
4170 	spin_lock(&fp->fi_lock);
4171 	dp->dl_stid.sc_file = fp;
4172 	if (!fp->fi_deleg_file) {
4173 		spin_unlock(&fp->fi_lock);
4174 		spin_unlock(&state_lock);
4175 		status = nfs4_setlease(dp);
4176 		goto out;
4177 	}
4178 	if (fp->fi_had_conflict) {
4179 		status = -EAGAIN;
4180 		goto out_unlock;
4181 	}
4182 	status = hash_delegation_locked(dp, fp);
4183 out_unlock:
4184 	spin_unlock(&fp->fi_lock);
4185 	spin_unlock(&state_lock);
4186 out:
4187 	if (status) {
4188 		put_clnt_odstate(dp->dl_clnt_odstate);
4189 		nfs4_put_stid(&dp->dl_stid);
4190 		return ERR_PTR(status);
4191 	}
4192 	return dp;
4193 }
4194 
4195 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4196 {
4197 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4198 	if (status == -EAGAIN)
4199 		open->op_why_no_deleg = WND4_CONTENTION;
4200 	else {
4201 		open->op_why_no_deleg = WND4_RESOURCE;
4202 		switch (open->op_deleg_want) {
4203 		case NFS4_SHARE_WANT_READ_DELEG:
4204 		case NFS4_SHARE_WANT_WRITE_DELEG:
4205 		case NFS4_SHARE_WANT_ANY_DELEG:
4206 			break;
4207 		case NFS4_SHARE_WANT_CANCEL:
4208 			open->op_why_no_deleg = WND4_CANCELLED;
4209 			break;
4210 		case NFS4_SHARE_WANT_NO_DELEG:
4211 			WARN_ON_ONCE(1);
4212 		}
4213 	}
4214 }
4215 
4216 /*
4217  * Attempt to hand out a delegation.
4218  *
4219  * Note we don't support write delegations, and won't until the vfs has
4220  * proper support for them.
4221  */
4222 static void
4223 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4224 			struct nfs4_ol_stateid *stp)
4225 {
4226 	struct nfs4_delegation *dp;
4227 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4228 	struct nfs4_client *clp = stp->st_stid.sc_client;
4229 	int cb_up;
4230 	int status = 0;
4231 
4232 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4233 	open->op_recall = 0;
4234 	switch (open->op_claim_type) {
4235 		case NFS4_OPEN_CLAIM_PREVIOUS:
4236 			if (!cb_up)
4237 				open->op_recall = 1;
4238 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4239 				goto out_no_deleg;
4240 			break;
4241 		case NFS4_OPEN_CLAIM_NULL:
4242 		case NFS4_OPEN_CLAIM_FH:
4243 			/*
4244 			 * Let's not give out any delegations till everyone's
4245 			 * had the chance to reclaim theirs, *and* until
4246 			 * NLM locks have all been reclaimed:
4247 			 */
4248 			if (locks_in_grace(clp->net))
4249 				goto out_no_deleg;
4250 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4251 				goto out_no_deleg;
4252 			/*
4253 			 * Also, if the file was opened for write or
4254 			 * create, there's a good chance the client's
4255 			 * about to write to it, resulting in an
4256 			 * immediate recall (since we don't support
4257 			 * write delegations):
4258 			 */
4259 			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4260 				goto out_no_deleg;
4261 			if (open->op_create == NFS4_OPEN_CREATE)
4262 				goto out_no_deleg;
4263 			break;
4264 		default:
4265 			goto out_no_deleg;
4266 	}
4267 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4268 	if (IS_ERR(dp))
4269 		goto out_no_deleg;
4270 
4271 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4272 
4273 	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4274 		STATEID_VAL(&dp->dl_stid.sc_stateid));
4275 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4276 	nfs4_put_stid(&dp->dl_stid);
4277 	return;
4278 out_no_deleg:
4279 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4280 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4281 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4282 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4283 		open->op_recall = 1;
4284 	}
4285 
4286 	/* 4.1 client asking for a delegation? */
4287 	if (open->op_deleg_want)
4288 		nfsd4_open_deleg_none_ext(open, status);
4289 	return;
4290 }
4291 
4292 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4293 					struct nfs4_delegation *dp)
4294 {
4295 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4296 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4297 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4298 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4299 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4300 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4301 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4302 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4303 	}
4304 	/* Otherwise the client must be confused wanting a delegation
4305 	 * it already has, therefore we don't return
4306 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4307 	 */
4308 }
4309 
4310 __be32
4311 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4312 {
4313 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
4314 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4315 	struct nfs4_file *fp = NULL;
4316 	struct nfs4_ol_stateid *stp = NULL;
4317 	struct nfs4_delegation *dp = NULL;
4318 	__be32 status;
4319 
4320 	/*
4321 	 * Lookup file; if found, lookup stateid and check open request,
4322 	 * and check for delegations in the process of being recalled.
4323 	 * If not found, create the nfs4_file struct
4324 	 */
4325 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4326 	if (fp != open->op_file) {
4327 		status = nfs4_check_deleg(cl, open, &dp);
4328 		if (status)
4329 			goto out;
4330 		spin_lock(&fp->fi_lock);
4331 		stp = nfsd4_find_existing_open(fp, open);
4332 		spin_unlock(&fp->fi_lock);
4333 	} else {
4334 		open->op_file = NULL;
4335 		status = nfserr_bad_stateid;
4336 		if (nfsd4_is_deleg_cur(open))
4337 			goto out;
4338 	}
4339 
4340 	/*
4341 	 * OPEN the file, or upgrade an existing OPEN.
4342 	 * If truncate fails, the OPEN fails.
4343 	 */
4344 	if (stp) {
4345 		/* Stateid was found, this is an OPEN upgrade */
4346 		mutex_lock(&stp->st_mutex);
4347 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4348 		if (status) {
4349 			mutex_unlock(&stp->st_mutex);
4350 			goto out;
4351 		}
4352 	} else {
4353 		/* stp is returned locked. */
4354 		stp = init_open_stateid(fp, open);
4355 		/* See if we lost the race to some other thread */
4356 		if (stp->st_access_bmap != 0) {
4357 			status = nfs4_upgrade_open(rqstp, fp, current_fh,
4358 						stp, open);
4359 			if (status) {
4360 				mutex_unlock(&stp->st_mutex);
4361 				goto out;
4362 			}
4363 			goto upgrade_out;
4364 		}
4365 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4366 		if (status) {
4367 			mutex_unlock(&stp->st_mutex);
4368 			release_open_stateid(stp);
4369 			goto out;
4370 		}
4371 
4372 		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4373 							open->op_odstate);
4374 		if (stp->st_clnt_odstate == open->op_odstate)
4375 			open->op_odstate = NULL;
4376 	}
4377 upgrade_out:
4378 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4379 	mutex_unlock(&stp->st_mutex);
4380 
4381 	if (nfsd4_has_session(&resp->cstate)) {
4382 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4383 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4384 			open->op_why_no_deleg = WND4_NOT_WANTED;
4385 			goto nodeleg;
4386 		}
4387 	}
4388 
4389 	/*
4390 	* Attempt to hand out a delegation. No error return, because the
4391 	* OPEN succeeds even if we fail.
4392 	*/
4393 	nfs4_open_delegation(current_fh, open, stp);
4394 nodeleg:
4395 	status = nfs_ok;
4396 
4397 	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4398 		STATEID_VAL(&stp->st_stid.sc_stateid));
4399 out:
4400 	/* 4.1 client trying to upgrade/downgrade delegation? */
4401 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4402 	    open->op_deleg_want)
4403 		nfsd4_deleg_xgrade_none_ext(open, dp);
4404 
4405 	if (fp)
4406 		put_nfs4_file(fp);
4407 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4408 		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4409 	/*
4410 	* To finish the open response, we just need to set the rflags.
4411 	*/
4412 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4413 	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
4414 	    !nfsd4_has_session(&resp->cstate))
4415 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4416 	if (dp)
4417 		nfs4_put_stid(&dp->dl_stid);
4418 	if (stp)
4419 		nfs4_put_stid(&stp->st_stid);
4420 
4421 	return status;
4422 }
4423 
4424 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4425 			      struct nfsd4_open *open)
4426 {
4427 	if (open->op_openowner) {
4428 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4429 
4430 		nfsd4_cstate_assign_replay(cstate, so);
4431 		nfs4_put_stateowner(so);
4432 	}
4433 	if (open->op_file)
4434 		kmem_cache_free(file_slab, open->op_file);
4435 	if (open->op_stp)
4436 		nfs4_put_stid(&open->op_stp->st_stid);
4437 	if (open->op_odstate)
4438 		kmem_cache_free(odstate_slab, open->op_odstate);
4439 }
4440 
4441 __be32
4442 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4443 	    clientid_t *clid)
4444 {
4445 	struct nfs4_client *clp;
4446 	__be32 status;
4447 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4448 
4449 	dprintk("process_renew(%08x/%08x): starting\n",
4450 			clid->cl_boot, clid->cl_id);
4451 	status = lookup_clientid(clid, cstate, nn);
4452 	if (status)
4453 		goto out;
4454 	clp = cstate->clp;
4455 	status = nfserr_cb_path_down;
4456 	if (!list_empty(&clp->cl_delegations)
4457 			&& clp->cl_cb_state != NFSD4_CB_UP)
4458 		goto out;
4459 	status = nfs_ok;
4460 out:
4461 	return status;
4462 }
4463 
4464 void
4465 nfsd4_end_grace(struct nfsd_net *nn)
4466 {
4467 	/* do nothing if grace period already ended */
4468 	if (nn->grace_ended)
4469 		return;
4470 
4471 	dprintk("NFSD: end of grace period\n");
4472 	nn->grace_ended = true;
4473 	/*
4474 	 * If the server goes down again right now, an NFSv4
4475 	 * client will still be allowed to reclaim after it comes back up,
4476 	 * even if it hasn't yet had a chance to reclaim state this time.
4477 	 *
4478 	 */
4479 	nfsd4_record_grace_done(nn);
4480 	/*
4481 	 * At this point, NFSv4 clients can still reclaim.  But if the
4482 	 * server crashes, any that have not yet reclaimed will be out
4483 	 * of luck on the next boot.
4484 	 *
4485 	 * (NFSv4.1+ clients are considered to have reclaimed once they
4486 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
4487 	 * have reclaimed after their first OPEN.)
4488 	 */
4489 	locks_end_grace(&nn->nfsd4_manager);
4490 	/*
4491 	 * At this point, and once lockd and/or any other containers
4492 	 * exit their grace period, further reclaims will fail and
4493 	 * regular locking can resume.
4494 	 */
4495 }
4496 
4497 static time_t
4498 nfs4_laundromat(struct nfsd_net *nn)
4499 {
4500 	struct nfs4_client *clp;
4501 	struct nfs4_openowner *oo;
4502 	struct nfs4_delegation *dp;
4503 	struct nfs4_ol_stateid *stp;
4504 	struct list_head *pos, *next, reaplist;
4505 	time_t cutoff = get_seconds() - nn->nfsd4_lease;
4506 	time_t t, new_timeo = nn->nfsd4_lease;
4507 
4508 	dprintk("NFSD: laundromat service - starting\n");
4509 	nfsd4_end_grace(nn);
4510 	INIT_LIST_HEAD(&reaplist);
4511 	spin_lock(&nn->client_lock);
4512 	list_for_each_safe(pos, next, &nn->client_lru) {
4513 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4514 		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4515 			t = clp->cl_time - cutoff;
4516 			new_timeo = min(new_timeo, t);
4517 			break;
4518 		}
4519 		if (mark_client_expired_locked(clp)) {
4520 			dprintk("NFSD: client in use (clientid %08x)\n",
4521 				clp->cl_clientid.cl_id);
4522 			continue;
4523 		}
4524 		list_add(&clp->cl_lru, &reaplist);
4525 	}
4526 	spin_unlock(&nn->client_lock);
4527 	list_for_each_safe(pos, next, &reaplist) {
4528 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4529 		dprintk("NFSD: purging unused client (clientid %08x)\n",
4530 			clp->cl_clientid.cl_id);
4531 		list_del_init(&clp->cl_lru);
4532 		expire_client(clp);
4533 	}
4534 	spin_lock(&state_lock);
4535 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
4536 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4537 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4538 			t = dp->dl_time - cutoff;
4539 			new_timeo = min(new_timeo, t);
4540 			break;
4541 		}
4542 		WARN_ON(!unhash_delegation_locked(dp));
4543 		list_add(&dp->dl_recall_lru, &reaplist);
4544 	}
4545 	spin_unlock(&state_lock);
4546 	while (!list_empty(&reaplist)) {
4547 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
4548 					dl_recall_lru);
4549 		list_del_init(&dp->dl_recall_lru);
4550 		revoke_delegation(dp);
4551 	}
4552 
4553 	spin_lock(&nn->client_lock);
4554 	while (!list_empty(&nn->close_lru)) {
4555 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4556 					oo_close_lru);
4557 		if (time_after((unsigned long)oo->oo_time,
4558 			       (unsigned long)cutoff)) {
4559 			t = oo->oo_time - cutoff;
4560 			new_timeo = min(new_timeo, t);
4561 			break;
4562 		}
4563 		list_del_init(&oo->oo_close_lru);
4564 		stp = oo->oo_last_closed_stid;
4565 		oo->oo_last_closed_stid = NULL;
4566 		spin_unlock(&nn->client_lock);
4567 		nfs4_put_stid(&stp->st_stid);
4568 		spin_lock(&nn->client_lock);
4569 	}
4570 	spin_unlock(&nn->client_lock);
4571 
4572 	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4573 	return new_timeo;
4574 }
4575 
4576 static struct workqueue_struct *laundry_wq;
4577 static void laundromat_main(struct work_struct *);
4578 
4579 static void
4580 laundromat_main(struct work_struct *laundry)
4581 {
4582 	time_t t;
4583 	struct delayed_work *dwork = to_delayed_work(laundry);
4584 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4585 					   laundromat_work);
4586 
4587 	t = nfs4_laundromat(nn);
4588 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4589 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4590 }
4591 
4592 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4593 {
4594 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4595 		return nfserr_bad_stateid;
4596 	return nfs_ok;
4597 }
4598 
4599 static inline int
4600 access_permit_read(struct nfs4_ol_stateid *stp)
4601 {
4602 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4603 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4604 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4605 }
4606 
4607 static inline int
4608 access_permit_write(struct nfs4_ol_stateid *stp)
4609 {
4610 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4611 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4612 }
4613 
4614 static
4615 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4616 {
4617         __be32 status = nfserr_openmode;
4618 
4619 	/* For lock stateid's, we test the parent open, not the lock: */
4620 	if (stp->st_openstp)
4621 		stp = stp->st_openstp;
4622 	if ((flags & WR_STATE) && !access_permit_write(stp))
4623                 goto out;
4624 	if ((flags & RD_STATE) && !access_permit_read(stp))
4625                 goto out;
4626 	status = nfs_ok;
4627 out:
4628 	return status;
4629 }
4630 
4631 static inline __be32
4632 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4633 {
4634 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
4635 		return nfs_ok;
4636 	else if (opens_in_grace(net)) {
4637 		/* Answer in remaining cases depends on existence of
4638 		 * conflicting state; so we must wait out the grace period. */
4639 		return nfserr_grace;
4640 	} else if (flags & WR_STATE)
4641 		return nfs4_share_conflict(current_fh,
4642 				NFS4_SHARE_DENY_WRITE);
4643 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4644 		return nfs4_share_conflict(current_fh,
4645 				NFS4_SHARE_DENY_READ);
4646 }
4647 
4648 /*
4649  * Allow READ/WRITE during grace period on recovered state only for files
4650  * that are not able to provide mandatory locking.
4651  */
4652 static inline int
4653 grace_disallows_io(struct net *net, struct inode *inode)
4654 {
4655 	return opens_in_grace(net) && mandatory_lock(inode);
4656 }
4657 
4658 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4659 {
4660 	/*
4661 	 * When sessions are used the stateid generation number is ignored
4662 	 * when it is zero.
4663 	 */
4664 	if (has_session && in->si_generation == 0)
4665 		return nfs_ok;
4666 
4667 	if (in->si_generation == ref->si_generation)
4668 		return nfs_ok;
4669 
4670 	/* If the client sends us a stateid from the future, it's buggy: */
4671 	if (nfsd4_stateid_generation_after(in, ref))
4672 		return nfserr_bad_stateid;
4673 	/*
4674 	 * However, we could see a stateid from the past, even from a
4675 	 * non-buggy client.  For example, if the client sends a lock
4676 	 * while some IO is outstanding, the lock may bump si_generation
4677 	 * while the IO is still in flight.  The client could avoid that
4678 	 * situation by waiting for responses on all the IO requests,
4679 	 * but better performance may result in retrying IO that
4680 	 * receives an old_stateid error if requests are rarely
4681 	 * reordered in flight:
4682 	 */
4683 	return nfserr_old_stateid;
4684 }
4685 
4686 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4687 {
4688 	if (ols->st_stateowner->so_is_open_owner &&
4689 	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4690 		return nfserr_bad_stateid;
4691 	return nfs_ok;
4692 }
4693 
4694 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4695 {
4696 	struct nfs4_stid *s;
4697 	__be32 status = nfserr_bad_stateid;
4698 
4699 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4700 		return status;
4701 	/* Client debugging aid. */
4702 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4703 		char addr_str[INET6_ADDRSTRLEN];
4704 		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4705 				 sizeof(addr_str));
4706 		pr_warn_ratelimited("NFSD: client %s testing state ID "
4707 					"with incorrect client ID\n", addr_str);
4708 		return status;
4709 	}
4710 	spin_lock(&cl->cl_lock);
4711 	s = find_stateid_locked(cl, stateid);
4712 	if (!s)
4713 		goto out_unlock;
4714 	status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4715 	if (status)
4716 		goto out_unlock;
4717 	switch (s->sc_type) {
4718 	case NFS4_DELEG_STID:
4719 		status = nfs_ok;
4720 		break;
4721 	case NFS4_REVOKED_DELEG_STID:
4722 		status = nfserr_deleg_revoked;
4723 		break;
4724 	case NFS4_OPEN_STID:
4725 	case NFS4_LOCK_STID:
4726 		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4727 		break;
4728 	default:
4729 		printk("unknown stateid type %x\n", s->sc_type);
4730 		/* Fallthrough */
4731 	case NFS4_CLOSED_STID:
4732 	case NFS4_CLOSED_DELEG_STID:
4733 		status = nfserr_bad_stateid;
4734 	}
4735 out_unlock:
4736 	spin_unlock(&cl->cl_lock);
4737 	return status;
4738 }
4739 
4740 __be32
4741 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4742 		     stateid_t *stateid, unsigned char typemask,
4743 		     struct nfs4_stid **s, struct nfsd_net *nn)
4744 {
4745 	__be32 status;
4746 
4747 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4748 		return nfserr_bad_stateid;
4749 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4750 	if (status == nfserr_stale_clientid) {
4751 		if (cstate->session)
4752 			return nfserr_bad_stateid;
4753 		return nfserr_stale_stateid;
4754 	}
4755 	if (status)
4756 		return status;
4757 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
4758 	if (!*s)
4759 		return nfserr_bad_stateid;
4760 	return nfs_ok;
4761 }
4762 
4763 static struct file *
4764 nfs4_find_file(struct nfs4_stid *s, int flags)
4765 {
4766 	if (!s)
4767 		return NULL;
4768 
4769 	switch (s->sc_type) {
4770 	case NFS4_DELEG_STID:
4771 		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
4772 			return NULL;
4773 		return get_file(s->sc_file->fi_deleg_file);
4774 	case NFS4_OPEN_STID:
4775 	case NFS4_LOCK_STID:
4776 		if (flags & RD_STATE)
4777 			return find_readable_file(s->sc_file);
4778 		else
4779 			return find_writeable_file(s->sc_file);
4780 		break;
4781 	}
4782 
4783 	return NULL;
4784 }
4785 
4786 static __be32
4787 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4788 {
4789 	__be32 status;
4790 
4791 	status = nfsd4_check_openowner_confirmed(ols);
4792 	if (status)
4793 		return status;
4794 	return nfs4_check_openmode(ols, flags);
4795 }
4796 
4797 static __be32
4798 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
4799 		struct file **filpp, bool *tmp_file, int flags)
4800 {
4801 	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
4802 	struct file *file;
4803 	__be32 status;
4804 
4805 	file = nfs4_find_file(s, flags);
4806 	if (file) {
4807 		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
4808 				acc | NFSD_MAY_OWNER_OVERRIDE);
4809 		if (status) {
4810 			fput(file);
4811 			return status;
4812 		}
4813 
4814 		*filpp = file;
4815 	} else {
4816 		status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
4817 		if (status)
4818 			return status;
4819 
4820 		if (tmp_file)
4821 			*tmp_file = true;
4822 	}
4823 
4824 	return 0;
4825 }
4826 
4827 /*
4828  * Checks for stateid operations
4829  */
4830 __be32
4831 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4832 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
4833 		stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
4834 {
4835 	struct inode *ino = d_inode(fhp->fh_dentry);
4836 	struct net *net = SVC_NET(rqstp);
4837 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4838 	struct nfs4_stid *s = NULL;
4839 	__be32 status;
4840 
4841 	if (filpp)
4842 		*filpp = NULL;
4843 	if (tmp_file)
4844 		*tmp_file = false;
4845 
4846 	if (grace_disallows_io(net, ino))
4847 		return nfserr_grace;
4848 
4849 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
4850 		status = check_special_stateids(net, fhp, stateid, flags);
4851 		goto done;
4852 	}
4853 
4854 	status = nfsd4_lookup_stateid(cstate, stateid,
4855 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4856 				&s, nn);
4857 	if (status)
4858 		return status;
4859 	status = check_stateid_generation(stateid, &s->sc_stateid,
4860 			nfsd4_has_session(cstate));
4861 	if (status)
4862 		goto out;
4863 
4864 	switch (s->sc_type) {
4865 	case NFS4_DELEG_STID:
4866 		status = nfs4_check_delegmode(delegstateid(s), flags);
4867 		break;
4868 	case NFS4_OPEN_STID:
4869 	case NFS4_LOCK_STID:
4870 		status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
4871 		break;
4872 	default:
4873 		status = nfserr_bad_stateid;
4874 		break;
4875 	}
4876 	if (status)
4877 		goto out;
4878 	status = nfs4_check_fh(fhp, s);
4879 
4880 done:
4881 	if (!status && filpp)
4882 		status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
4883 out:
4884 	if (s)
4885 		nfs4_put_stid(s);
4886 	return status;
4887 }
4888 
4889 /*
4890  * Test if the stateid is valid
4891  */
4892 __be32
4893 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4894 		   struct nfsd4_test_stateid *test_stateid)
4895 {
4896 	struct nfsd4_test_stateid_id *stateid;
4897 	struct nfs4_client *cl = cstate->session->se_client;
4898 
4899 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4900 		stateid->ts_id_status =
4901 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4902 
4903 	return nfs_ok;
4904 }
4905 
4906 static __be32
4907 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
4908 {
4909 	struct nfs4_ol_stateid *stp = openlockstateid(s);
4910 	__be32 ret;
4911 
4912 	mutex_lock(&stp->st_mutex);
4913 
4914 	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4915 	if (ret)
4916 		goto out;
4917 
4918 	ret = nfserr_locks_held;
4919 	if (check_for_locks(stp->st_stid.sc_file,
4920 			    lockowner(stp->st_stateowner)))
4921 		goto out;
4922 
4923 	release_lock_stateid(stp);
4924 	ret = nfs_ok;
4925 
4926 out:
4927 	mutex_unlock(&stp->st_mutex);
4928 	nfs4_put_stid(s);
4929 	return ret;
4930 }
4931 
4932 __be32
4933 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4934 		   struct nfsd4_free_stateid *free_stateid)
4935 {
4936 	stateid_t *stateid = &free_stateid->fr_stateid;
4937 	struct nfs4_stid *s;
4938 	struct nfs4_delegation *dp;
4939 	struct nfs4_client *cl = cstate->session->se_client;
4940 	__be32 ret = nfserr_bad_stateid;
4941 
4942 	spin_lock(&cl->cl_lock);
4943 	s = find_stateid_locked(cl, stateid);
4944 	if (!s)
4945 		goto out_unlock;
4946 	switch (s->sc_type) {
4947 	case NFS4_DELEG_STID:
4948 		ret = nfserr_locks_held;
4949 		break;
4950 	case NFS4_OPEN_STID:
4951 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4952 		if (ret)
4953 			break;
4954 		ret = nfserr_locks_held;
4955 		break;
4956 	case NFS4_LOCK_STID:
4957 		atomic_inc(&s->sc_count);
4958 		spin_unlock(&cl->cl_lock);
4959 		ret = nfsd4_free_lock_stateid(stateid, s);
4960 		goto out;
4961 	case NFS4_REVOKED_DELEG_STID:
4962 		dp = delegstateid(s);
4963 		list_del_init(&dp->dl_recall_lru);
4964 		spin_unlock(&cl->cl_lock);
4965 		nfs4_put_stid(s);
4966 		ret = nfs_ok;
4967 		goto out;
4968 	/* Default falls through and returns nfserr_bad_stateid */
4969 	}
4970 out_unlock:
4971 	spin_unlock(&cl->cl_lock);
4972 out:
4973 	return ret;
4974 }
4975 
4976 static inline int
4977 setlkflg (int type)
4978 {
4979 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
4980 		RD_STATE : WR_STATE;
4981 }
4982 
4983 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4984 {
4985 	struct svc_fh *current_fh = &cstate->current_fh;
4986 	struct nfs4_stateowner *sop = stp->st_stateowner;
4987 	__be32 status;
4988 
4989 	status = nfsd4_check_seqid(cstate, sop, seqid);
4990 	if (status)
4991 		return status;
4992 	if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4993 		|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4994 		/*
4995 		 * "Closed" stateid's exist *only* to return
4996 		 * nfserr_replay_me from the previous step, and
4997 		 * revoked delegations are kept only for free_stateid.
4998 		 */
4999 		return nfserr_bad_stateid;
5000 	mutex_lock(&stp->st_mutex);
5001 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5002 	if (status == nfs_ok)
5003 		status = nfs4_check_fh(current_fh, &stp->st_stid);
5004 	if (status != nfs_ok)
5005 		mutex_unlock(&stp->st_mutex);
5006 	return status;
5007 }
5008 
5009 /*
5010  * Checks for sequence id mutating operations.
5011  */
5012 static __be32
5013 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5014 			 stateid_t *stateid, char typemask,
5015 			 struct nfs4_ol_stateid **stpp,
5016 			 struct nfsd_net *nn)
5017 {
5018 	__be32 status;
5019 	struct nfs4_stid *s;
5020 	struct nfs4_ol_stateid *stp = NULL;
5021 
5022 	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5023 		seqid, STATEID_VAL(stateid));
5024 
5025 	*stpp = NULL;
5026 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5027 	if (status)
5028 		return status;
5029 	stp = openlockstateid(s);
5030 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5031 
5032 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5033 	if (!status)
5034 		*stpp = stp;
5035 	else
5036 		nfs4_put_stid(&stp->st_stid);
5037 	return status;
5038 }
5039 
5040 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5041 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5042 {
5043 	__be32 status;
5044 	struct nfs4_openowner *oo;
5045 	struct nfs4_ol_stateid *stp;
5046 
5047 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5048 						NFS4_OPEN_STID, &stp, nn);
5049 	if (status)
5050 		return status;
5051 	oo = openowner(stp->st_stateowner);
5052 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5053 		mutex_unlock(&stp->st_mutex);
5054 		nfs4_put_stid(&stp->st_stid);
5055 		return nfserr_bad_stateid;
5056 	}
5057 	*stpp = stp;
5058 	return nfs_ok;
5059 }
5060 
5061 __be32
5062 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5063 		   struct nfsd4_open_confirm *oc)
5064 {
5065 	__be32 status;
5066 	struct nfs4_openowner *oo;
5067 	struct nfs4_ol_stateid *stp;
5068 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5069 
5070 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5071 			cstate->current_fh.fh_dentry);
5072 
5073 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5074 	if (status)
5075 		return status;
5076 
5077 	status = nfs4_preprocess_seqid_op(cstate,
5078 					oc->oc_seqid, &oc->oc_req_stateid,
5079 					NFS4_OPEN_STID, &stp, nn);
5080 	if (status)
5081 		goto out;
5082 	oo = openowner(stp->st_stateowner);
5083 	status = nfserr_bad_stateid;
5084 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5085 		mutex_unlock(&stp->st_mutex);
5086 		goto put_stateid;
5087 	}
5088 	oo->oo_flags |= NFS4_OO_CONFIRMED;
5089 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5090 	mutex_unlock(&stp->st_mutex);
5091 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5092 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5093 
5094 	nfsd4_client_record_create(oo->oo_owner.so_client);
5095 	status = nfs_ok;
5096 put_stateid:
5097 	nfs4_put_stid(&stp->st_stid);
5098 out:
5099 	nfsd4_bump_seqid(cstate, status);
5100 	return status;
5101 }
5102 
5103 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5104 {
5105 	if (!test_access(access, stp))
5106 		return;
5107 	nfs4_file_put_access(stp->st_stid.sc_file, access);
5108 	clear_access(access, stp);
5109 }
5110 
5111 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5112 {
5113 	switch (to_access) {
5114 	case NFS4_SHARE_ACCESS_READ:
5115 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5116 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5117 		break;
5118 	case NFS4_SHARE_ACCESS_WRITE:
5119 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5120 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5121 		break;
5122 	case NFS4_SHARE_ACCESS_BOTH:
5123 		break;
5124 	default:
5125 		WARN_ON_ONCE(1);
5126 	}
5127 }
5128 
5129 __be32
5130 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5131 		     struct nfsd4_compound_state *cstate,
5132 		     struct nfsd4_open_downgrade *od)
5133 {
5134 	__be32 status;
5135 	struct nfs4_ol_stateid *stp;
5136 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5137 
5138 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5139 			cstate->current_fh.fh_dentry);
5140 
5141 	/* We don't yet support WANT bits: */
5142 	if (od->od_deleg_want)
5143 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5144 			od->od_deleg_want);
5145 
5146 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5147 					&od->od_stateid, &stp, nn);
5148 	if (status)
5149 		goto out;
5150 	status = nfserr_inval;
5151 	if (!test_access(od->od_share_access, stp)) {
5152 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5153 			stp->st_access_bmap, od->od_share_access);
5154 		goto put_stateid;
5155 	}
5156 	if (!test_deny(od->od_share_deny, stp)) {
5157 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5158 			stp->st_deny_bmap, od->od_share_deny);
5159 		goto put_stateid;
5160 	}
5161 	nfs4_stateid_downgrade(stp, od->od_share_access);
5162 	reset_union_bmap_deny(od->od_share_deny, stp);
5163 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5164 	status = nfs_ok;
5165 put_stateid:
5166 	mutex_unlock(&stp->st_mutex);
5167 	nfs4_put_stid(&stp->st_stid);
5168 out:
5169 	nfsd4_bump_seqid(cstate, status);
5170 	return status;
5171 }
5172 
5173 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5174 {
5175 	struct nfs4_client *clp = s->st_stid.sc_client;
5176 	bool unhashed;
5177 	LIST_HEAD(reaplist);
5178 
5179 	s->st_stid.sc_type = NFS4_CLOSED_STID;
5180 	spin_lock(&clp->cl_lock);
5181 	unhashed = unhash_open_stateid(s, &reaplist);
5182 
5183 	if (clp->cl_minorversion) {
5184 		if (unhashed)
5185 			put_ol_stateid_locked(s, &reaplist);
5186 		spin_unlock(&clp->cl_lock);
5187 		free_ol_stateid_reaplist(&reaplist);
5188 	} else {
5189 		spin_unlock(&clp->cl_lock);
5190 		free_ol_stateid_reaplist(&reaplist);
5191 		if (unhashed)
5192 			move_to_close_lru(s, clp->net);
5193 	}
5194 }
5195 
5196 /*
5197  * nfs4_unlock_state() called after encode
5198  */
5199 __be32
5200 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5201 	    struct nfsd4_close *close)
5202 {
5203 	__be32 status;
5204 	struct nfs4_ol_stateid *stp;
5205 	struct net *net = SVC_NET(rqstp);
5206 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5207 
5208 	dprintk("NFSD: nfsd4_close on file %pd\n",
5209 			cstate->current_fh.fh_dentry);
5210 
5211 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5212 					&close->cl_stateid,
5213 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
5214 					&stp, nn);
5215 	nfsd4_bump_seqid(cstate, status);
5216 	if (status)
5217 		goto out;
5218 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5219 	mutex_unlock(&stp->st_mutex);
5220 
5221 	nfsd4_close_open_stateid(stp);
5222 
5223 	/* put reference from nfs4_preprocess_seqid_op */
5224 	nfs4_put_stid(&stp->st_stid);
5225 out:
5226 	return status;
5227 }
5228 
5229 __be32
5230 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5231 		  struct nfsd4_delegreturn *dr)
5232 {
5233 	struct nfs4_delegation *dp;
5234 	stateid_t *stateid = &dr->dr_stateid;
5235 	struct nfs4_stid *s;
5236 	__be32 status;
5237 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5238 
5239 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5240 		return status;
5241 
5242 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5243 	if (status)
5244 		goto out;
5245 	dp = delegstateid(s);
5246 	status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
5247 	if (status)
5248 		goto put_stateid;
5249 
5250 	destroy_delegation(dp);
5251 put_stateid:
5252 	nfs4_put_stid(&dp->dl_stid);
5253 out:
5254 	return status;
5255 }
5256 
5257 static inline u64
5258 end_offset(u64 start, u64 len)
5259 {
5260 	u64 end;
5261 
5262 	end = start + len;
5263 	return end >= start ? end: NFS4_MAX_UINT64;
5264 }
5265 
5266 /* last octet in a range */
5267 static inline u64
5268 last_byte_offset(u64 start, u64 len)
5269 {
5270 	u64 end;
5271 
5272 	WARN_ON_ONCE(!len);
5273 	end = start + len;
5274 	return end > start ? end - 1: NFS4_MAX_UINT64;
5275 }
5276 
5277 /*
5278  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5279  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5280  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
5281  * locking, this prevents us from being completely protocol-compliant.  The
5282  * real solution to this problem is to start using unsigned file offsets in
5283  * the VFS, but this is a very deep change!
5284  */
5285 static inline void
5286 nfs4_transform_lock_offset(struct file_lock *lock)
5287 {
5288 	if (lock->fl_start < 0)
5289 		lock->fl_start = OFFSET_MAX;
5290 	if (lock->fl_end < 0)
5291 		lock->fl_end = OFFSET_MAX;
5292 }
5293 
5294 static fl_owner_t
5295 nfsd4_fl_get_owner(fl_owner_t owner)
5296 {
5297 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5298 
5299 	nfs4_get_stateowner(&lo->lo_owner);
5300 	return owner;
5301 }
5302 
5303 static void
5304 nfsd4_fl_put_owner(fl_owner_t owner)
5305 {
5306 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5307 
5308 	if (lo)
5309 		nfs4_put_stateowner(&lo->lo_owner);
5310 }
5311 
5312 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
5313 	.lm_get_owner = nfsd4_fl_get_owner,
5314 	.lm_put_owner = nfsd4_fl_put_owner,
5315 };
5316 
5317 static inline void
5318 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5319 {
5320 	struct nfs4_lockowner *lo;
5321 
5322 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5323 		lo = (struct nfs4_lockowner *) fl->fl_owner;
5324 		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5325 					lo->lo_owner.so_owner.len, GFP_KERNEL);
5326 		if (!deny->ld_owner.data)
5327 			/* We just don't care that much */
5328 			goto nevermind;
5329 		deny->ld_owner.len = lo->lo_owner.so_owner.len;
5330 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5331 	} else {
5332 nevermind:
5333 		deny->ld_owner.len = 0;
5334 		deny->ld_owner.data = NULL;
5335 		deny->ld_clientid.cl_boot = 0;
5336 		deny->ld_clientid.cl_id = 0;
5337 	}
5338 	deny->ld_start = fl->fl_start;
5339 	deny->ld_length = NFS4_MAX_UINT64;
5340 	if (fl->fl_end != NFS4_MAX_UINT64)
5341 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
5342 	deny->ld_type = NFS4_READ_LT;
5343 	if (fl->fl_type != F_RDLCK)
5344 		deny->ld_type = NFS4_WRITE_LT;
5345 }
5346 
5347 static struct nfs4_lockowner *
5348 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5349 {
5350 	unsigned int strhashval = ownerstr_hashval(owner);
5351 	struct nfs4_stateowner *so;
5352 
5353 	lockdep_assert_held(&clp->cl_lock);
5354 
5355 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5356 			    so_strhash) {
5357 		if (so->so_is_open_owner)
5358 			continue;
5359 		if (same_owner_str(so, owner))
5360 			return lockowner(nfs4_get_stateowner(so));
5361 	}
5362 	return NULL;
5363 }
5364 
5365 static struct nfs4_lockowner *
5366 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5367 {
5368 	struct nfs4_lockowner *lo;
5369 
5370 	spin_lock(&clp->cl_lock);
5371 	lo = find_lockowner_str_locked(clp, owner);
5372 	spin_unlock(&clp->cl_lock);
5373 	return lo;
5374 }
5375 
5376 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5377 {
5378 	unhash_lockowner_locked(lockowner(sop));
5379 }
5380 
5381 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5382 {
5383 	struct nfs4_lockowner *lo = lockowner(sop);
5384 
5385 	kmem_cache_free(lockowner_slab, lo);
5386 }
5387 
5388 static const struct nfs4_stateowner_operations lockowner_ops = {
5389 	.so_unhash =	nfs4_unhash_lockowner,
5390 	.so_free =	nfs4_free_lockowner,
5391 };
5392 
5393 /*
5394  * Alloc a lock owner structure.
5395  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5396  * occurred.
5397  *
5398  * strhashval = ownerstr_hashval
5399  */
5400 static struct nfs4_lockowner *
5401 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5402 			   struct nfs4_ol_stateid *open_stp,
5403 			   struct nfsd4_lock *lock)
5404 {
5405 	struct nfs4_lockowner *lo, *ret;
5406 
5407 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5408 	if (!lo)
5409 		return NULL;
5410 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5411 	lo->lo_owner.so_is_open_owner = 0;
5412 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5413 	lo->lo_owner.so_ops = &lockowner_ops;
5414 	spin_lock(&clp->cl_lock);
5415 	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5416 	if (ret == NULL) {
5417 		list_add(&lo->lo_owner.so_strhash,
5418 			 &clp->cl_ownerstr_hashtbl[strhashval]);
5419 		ret = lo;
5420 	} else
5421 		nfs4_free_stateowner(&lo->lo_owner);
5422 
5423 	spin_unlock(&clp->cl_lock);
5424 	return ret;
5425 }
5426 
5427 static void
5428 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5429 		  struct nfs4_file *fp, struct inode *inode,
5430 		  struct nfs4_ol_stateid *open_stp)
5431 {
5432 	struct nfs4_client *clp = lo->lo_owner.so_client;
5433 
5434 	lockdep_assert_held(&clp->cl_lock);
5435 
5436 	atomic_inc(&stp->st_stid.sc_count);
5437 	stp->st_stid.sc_type = NFS4_LOCK_STID;
5438 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5439 	get_nfs4_file(fp);
5440 	stp->st_stid.sc_file = fp;
5441 	stp->st_stid.sc_free = nfs4_free_lock_stateid;
5442 	stp->st_access_bmap = 0;
5443 	stp->st_deny_bmap = open_stp->st_deny_bmap;
5444 	stp->st_openstp = open_stp;
5445 	mutex_init(&stp->st_mutex);
5446 	list_add(&stp->st_locks, &open_stp->st_locks);
5447 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5448 	spin_lock(&fp->fi_lock);
5449 	list_add(&stp->st_perfile, &fp->fi_stateids);
5450 	spin_unlock(&fp->fi_lock);
5451 }
5452 
5453 static struct nfs4_ol_stateid *
5454 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5455 {
5456 	struct nfs4_ol_stateid *lst;
5457 	struct nfs4_client *clp = lo->lo_owner.so_client;
5458 
5459 	lockdep_assert_held(&clp->cl_lock);
5460 
5461 	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5462 		if (lst->st_stid.sc_file == fp) {
5463 			atomic_inc(&lst->st_stid.sc_count);
5464 			return lst;
5465 		}
5466 	}
5467 	return NULL;
5468 }
5469 
5470 static struct nfs4_ol_stateid *
5471 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5472 			    struct inode *inode, struct nfs4_ol_stateid *ost,
5473 			    bool *new)
5474 {
5475 	struct nfs4_stid *ns = NULL;
5476 	struct nfs4_ol_stateid *lst;
5477 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5478 	struct nfs4_client *clp = oo->oo_owner.so_client;
5479 
5480 	spin_lock(&clp->cl_lock);
5481 	lst = find_lock_stateid(lo, fi);
5482 	if (lst == NULL) {
5483 		spin_unlock(&clp->cl_lock);
5484 		ns = nfs4_alloc_stid(clp, stateid_slab);
5485 		if (ns == NULL)
5486 			return NULL;
5487 
5488 		spin_lock(&clp->cl_lock);
5489 		lst = find_lock_stateid(lo, fi);
5490 		if (likely(!lst)) {
5491 			lst = openlockstateid(ns);
5492 			init_lock_stateid(lst, lo, fi, inode, ost);
5493 			ns = NULL;
5494 			*new = true;
5495 		}
5496 	}
5497 	spin_unlock(&clp->cl_lock);
5498 	if (ns)
5499 		nfs4_put_stid(ns);
5500 	return lst;
5501 }
5502 
5503 static int
5504 check_lock_length(u64 offset, u64 length)
5505 {
5506 	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5507 		(length > ~offset)));
5508 }
5509 
5510 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5511 {
5512 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5513 
5514 	lockdep_assert_held(&fp->fi_lock);
5515 
5516 	if (test_access(access, lock_stp))
5517 		return;
5518 	__nfs4_file_get_access(fp, access);
5519 	set_access(access, lock_stp);
5520 }
5521 
5522 static __be32
5523 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5524 			    struct nfs4_ol_stateid *ost,
5525 			    struct nfsd4_lock *lock,
5526 			    struct nfs4_ol_stateid **plst, bool *new)
5527 {
5528 	__be32 status;
5529 	struct nfs4_file *fi = ost->st_stid.sc_file;
5530 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5531 	struct nfs4_client *cl = oo->oo_owner.so_client;
5532 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5533 	struct nfs4_lockowner *lo;
5534 	struct nfs4_ol_stateid *lst;
5535 	unsigned int strhashval;
5536 	bool hashed;
5537 
5538 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
5539 	if (!lo) {
5540 		strhashval = ownerstr_hashval(&lock->lk_new_owner);
5541 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5542 		if (lo == NULL)
5543 			return nfserr_jukebox;
5544 	} else {
5545 		/* with an existing lockowner, seqids must be the same */
5546 		status = nfserr_bad_seqid;
5547 		if (!cstate->minorversion &&
5548 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5549 			goto out;
5550 	}
5551 
5552 retry:
5553 	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5554 	if (lst == NULL) {
5555 		status = nfserr_jukebox;
5556 		goto out;
5557 	}
5558 
5559 	mutex_lock(&lst->st_mutex);
5560 
5561 	/* See if it's still hashed to avoid race with FREE_STATEID */
5562 	spin_lock(&cl->cl_lock);
5563 	hashed = !list_empty(&lst->st_perfile);
5564 	spin_unlock(&cl->cl_lock);
5565 
5566 	if (!hashed) {
5567 		mutex_unlock(&lst->st_mutex);
5568 		nfs4_put_stid(&lst->st_stid);
5569 		goto retry;
5570 	}
5571 	status = nfs_ok;
5572 	*plst = lst;
5573 out:
5574 	nfs4_put_stateowner(&lo->lo_owner);
5575 	return status;
5576 }
5577 
5578 /*
5579  *  LOCK operation
5580  */
5581 __be32
5582 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5583 	   struct nfsd4_lock *lock)
5584 {
5585 	struct nfs4_openowner *open_sop = NULL;
5586 	struct nfs4_lockowner *lock_sop = NULL;
5587 	struct nfs4_ol_stateid *lock_stp = NULL;
5588 	struct nfs4_ol_stateid *open_stp = NULL;
5589 	struct nfs4_file *fp;
5590 	struct file *filp = NULL;
5591 	struct file_lock *file_lock = NULL;
5592 	struct file_lock *conflock = NULL;
5593 	__be32 status = 0;
5594 	int lkflg;
5595 	int err;
5596 	bool new = false;
5597 	struct net *net = SVC_NET(rqstp);
5598 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5599 
5600 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5601 		(long long) lock->lk_offset,
5602 		(long long) lock->lk_length);
5603 
5604 	if (check_lock_length(lock->lk_offset, lock->lk_length))
5605 		 return nfserr_inval;
5606 
5607 	if ((status = fh_verify(rqstp, &cstate->current_fh,
5608 				S_IFREG, NFSD_MAY_LOCK))) {
5609 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
5610 		return status;
5611 	}
5612 
5613 	if (lock->lk_is_new) {
5614 		if (nfsd4_has_session(cstate))
5615 			/* See rfc 5661 18.10.3: given clientid is ignored: */
5616 			memcpy(&lock->lk_new_clientid,
5617 				&cstate->session->se_client->cl_clientid,
5618 				sizeof(clientid_t));
5619 
5620 		status = nfserr_stale_clientid;
5621 		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5622 			goto out;
5623 
5624 		/* validate and update open stateid and open seqid */
5625 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
5626 				        lock->lk_new_open_seqid,
5627 		                        &lock->lk_new_open_stateid,
5628 					&open_stp, nn);
5629 		if (status)
5630 			goto out;
5631 		mutex_unlock(&open_stp->st_mutex);
5632 		open_sop = openowner(open_stp->st_stateowner);
5633 		status = nfserr_bad_stateid;
5634 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5635 						&lock->lk_new_clientid))
5636 			goto out;
5637 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
5638 							&lock_stp, &new);
5639 	} else {
5640 		status = nfs4_preprocess_seqid_op(cstate,
5641 				       lock->lk_old_lock_seqid,
5642 				       &lock->lk_old_lock_stateid,
5643 				       NFS4_LOCK_STID, &lock_stp, nn);
5644 	}
5645 	if (status)
5646 		goto out;
5647 	lock_sop = lockowner(lock_stp->st_stateowner);
5648 
5649 	lkflg = setlkflg(lock->lk_type);
5650 	status = nfs4_check_openmode(lock_stp, lkflg);
5651 	if (status)
5652 		goto out;
5653 
5654 	status = nfserr_grace;
5655 	if (locks_in_grace(net) && !lock->lk_reclaim)
5656 		goto out;
5657 	status = nfserr_no_grace;
5658 	if (!locks_in_grace(net) && lock->lk_reclaim)
5659 		goto out;
5660 
5661 	file_lock = locks_alloc_lock();
5662 	if (!file_lock) {
5663 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5664 		status = nfserr_jukebox;
5665 		goto out;
5666 	}
5667 
5668 	fp = lock_stp->st_stid.sc_file;
5669 	switch (lock->lk_type) {
5670 		case NFS4_READ_LT:
5671 		case NFS4_READW_LT:
5672 			spin_lock(&fp->fi_lock);
5673 			filp = find_readable_file_locked(fp);
5674 			if (filp)
5675 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5676 			spin_unlock(&fp->fi_lock);
5677 			file_lock->fl_type = F_RDLCK;
5678 			break;
5679 		case NFS4_WRITE_LT:
5680 		case NFS4_WRITEW_LT:
5681 			spin_lock(&fp->fi_lock);
5682 			filp = find_writeable_file_locked(fp);
5683 			if (filp)
5684 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5685 			spin_unlock(&fp->fi_lock);
5686 			file_lock->fl_type = F_WRLCK;
5687 			break;
5688 		default:
5689 			status = nfserr_inval;
5690 		goto out;
5691 	}
5692 	if (!filp) {
5693 		status = nfserr_openmode;
5694 		goto out;
5695 	}
5696 
5697 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
5698 	file_lock->fl_pid = current->tgid;
5699 	file_lock->fl_file = filp;
5700 	file_lock->fl_flags = FL_POSIX;
5701 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
5702 	file_lock->fl_start = lock->lk_offset;
5703 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
5704 	nfs4_transform_lock_offset(file_lock);
5705 
5706 	conflock = locks_alloc_lock();
5707 	if (!conflock) {
5708 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5709 		status = nfserr_jukebox;
5710 		goto out;
5711 	}
5712 
5713 	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5714 	switch (-err) {
5715 	case 0: /* success! */
5716 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
5717 		status = 0;
5718 		break;
5719 	case (EAGAIN):		/* conflock holds conflicting lock */
5720 		status = nfserr_denied;
5721 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5722 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
5723 		break;
5724 	case (EDEADLK):
5725 		status = nfserr_deadlock;
5726 		break;
5727 	default:
5728 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5729 		status = nfserrno(err);
5730 		break;
5731 	}
5732 out:
5733 	if (filp)
5734 		fput(filp);
5735 	if (lock_stp) {
5736 		/* Bump seqid manually if the 4.0 replay owner is openowner */
5737 		if (cstate->replay_owner &&
5738 		    cstate->replay_owner != &lock_sop->lo_owner &&
5739 		    seqid_mutating_err(ntohl(status)))
5740 			lock_sop->lo_owner.so_seqid++;
5741 
5742 		mutex_unlock(&lock_stp->st_mutex);
5743 
5744 		/*
5745 		 * If this is a new, never-before-used stateid, and we are
5746 		 * returning an error, then just go ahead and release it.
5747 		 */
5748 		if (status && new)
5749 			release_lock_stateid(lock_stp);
5750 
5751 		nfs4_put_stid(&lock_stp->st_stid);
5752 	}
5753 	if (open_stp)
5754 		nfs4_put_stid(&open_stp->st_stid);
5755 	nfsd4_bump_seqid(cstate, status);
5756 	if (file_lock)
5757 		locks_free_lock(file_lock);
5758 	if (conflock)
5759 		locks_free_lock(conflock);
5760 	return status;
5761 }
5762 
5763 /*
5764  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5765  * so we do a temporary open here just to get an open file to pass to
5766  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
5767  * inode operation.)
5768  */
5769 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5770 {
5771 	struct file *file;
5772 	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
5773 	if (!err) {
5774 		err = nfserrno(vfs_test_lock(file, lock));
5775 		fput(file);
5776 	}
5777 	return err;
5778 }
5779 
5780 /*
5781  * LOCKT operation
5782  */
5783 __be32
5784 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5785 	    struct nfsd4_lockt *lockt)
5786 {
5787 	struct file_lock *file_lock = NULL;
5788 	struct nfs4_lockowner *lo = NULL;
5789 	__be32 status;
5790 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5791 
5792 	if (locks_in_grace(SVC_NET(rqstp)))
5793 		return nfserr_grace;
5794 
5795 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
5796 		 return nfserr_inval;
5797 
5798 	if (!nfsd4_has_session(cstate)) {
5799 		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5800 		if (status)
5801 			goto out;
5802 	}
5803 
5804 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5805 		goto out;
5806 
5807 	file_lock = locks_alloc_lock();
5808 	if (!file_lock) {
5809 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5810 		status = nfserr_jukebox;
5811 		goto out;
5812 	}
5813 
5814 	switch (lockt->lt_type) {
5815 		case NFS4_READ_LT:
5816 		case NFS4_READW_LT:
5817 			file_lock->fl_type = F_RDLCK;
5818 		break;
5819 		case NFS4_WRITE_LT:
5820 		case NFS4_WRITEW_LT:
5821 			file_lock->fl_type = F_WRLCK;
5822 		break;
5823 		default:
5824 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5825 			status = nfserr_inval;
5826 		goto out;
5827 	}
5828 
5829 	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
5830 	if (lo)
5831 		file_lock->fl_owner = (fl_owner_t)lo;
5832 	file_lock->fl_pid = current->tgid;
5833 	file_lock->fl_flags = FL_POSIX;
5834 
5835 	file_lock->fl_start = lockt->lt_offset;
5836 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
5837 
5838 	nfs4_transform_lock_offset(file_lock);
5839 
5840 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5841 	if (status)
5842 		goto out;
5843 
5844 	if (file_lock->fl_type != F_UNLCK) {
5845 		status = nfserr_denied;
5846 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
5847 	}
5848 out:
5849 	if (lo)
5850 		nfs4_put_stateowner(&lo->lo_owner);
5851 	if (file_lock)
5852 		locks_free_lock(file_lock);
5853 	return status;
5854 }
5855 
5856 __be32
5857 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5858 	    struct nfsd4_locku *locku)
5859 {
5860 	struct nfs4_ol_stateid *stp;
5861 	struct file *filp = NULL;
5862 	struct file_lock *file_lock = NULL;
5863 	__be32 status;
5864 	int err;
5865 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5866 
5867 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5868 		(long long) locku->lu_offset,
5869 		(long long) locku->lu_length);
5870 
5871 	if (check_lock_length(locku->lu_offset, locku->lu_length))
5872 		 return nfserr_inval;
5873 
5874 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5875 					&locku->lu_stateid, NFS4_LOCK_STID,
5876 					&stp, nn);
5877 	if (status)
5878 		goto out;
5879 	filp = find_any_file(stp->st_stid.sc_file);
5880 	if (!filp) {
5881 		status = nfserr_lock_range;
5882 		goto put_stateid;
5883 	}
5884 	file_lock = locks_alloc_lock();
5885 	if (!file_lock) {
5886 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5887 		status = nfserr_jukebox;
5888 		goto fput;
5889 	}
5890 
5891 	file_lock->fl_type = F_UNLCK;
5892 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
5893 	file_lock->fl_pid = current->tgid;
5894 	file_lock->fl_file = filp;
5895 	file_lock->fl_flags = FL_POSIX;
5896 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
5897 	file_lock->fl_start = locku->lu_offset;
5898 
5899 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
5900 						locku->lu_length);
5901 	nfs4_transform_lock_offset(file_lock);
5902 
5903 	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5904 	if (err) {
5905 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5906 		goto out_nfserr;
5907 	}
5908 	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
5909 fput:
5910 	fput(filp);
5911 put_stateid:
5912 	mutex_unlock(&stp->st_mutex);
5913 	nfs4_put_stid(&stp->st_stid);
5914 out:
5915 	nfsd4_bump_seqid(cstate, status);
5916 	if (file_lock)
5917 		locks_free_lock(file_lock);
5918 	return status;
5919 
5920 out_nfserr:
5921 	status = nfserrno(err);
5922 	goto fput;
5923 }
5924 
5925 /*
5926  * returns
5927  * 	true:  locks held by lockowner
5928  * 	false: no locks held by lockowner
5929  */
5930 static bool
5931 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5932 {
5933 	struct file_lock *fl;
5934 	int status = false;
5935 	struct file *filp = find_any_file(fp);
5936 	struct inode *inode;
5937 	struct file_lock_context *flctx;
5938 
5939 	if (!filp) {
5940 		/* Any valid lock stateid should have some sort of access */
5941 		WARN_ON_ONCE(1);
5942 		return status;
5943 	}
5944 
5945 	inode = file_inode(filp);
5946 	flctx = inode->i_flctx;
5947 
5948 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5949 		spin_lock(&flctx->flc_lock);
5950 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5951 			if (fl->fl_owner == (fl_owner_t)lowner) {
5952 				status = true;
5953 				break;
5954 			}
5955 		}
5956 		spin_unlock(&flctx->flc_lock);
5957 	}
5958 	fput(filp);
5959 	return status;
5960 }
5961 
5962 __be32
5963 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5964 			struct nfsd4_compound_state *cstate,
5965 			struct nfsd4_release_lockowner *rlockowner)
5966 {
5967 	clientid_t *clid = &rlockowner->rl_clientid;
5968 	struct nfs4_stateowner *sop;
5969 	struct nfs4_lockowner *lo = NULL;
5970 	struct nfs4_ol_stateid *stp;
5971 	struct xdr_netobj *owner = &rlockowner->rl_owner;
5972 	unsigned int hashval = ownerstr_hashval(owner);
5973 	__be32 status;
5974 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5975 	struct nfs4_client *clp;
5976 	LIST_HEAD (reaplist);
5977 
5978 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5979 		clid->cl_boot, clid->cl_id);
5980 
5981 	status = lookup_clientid(clid, cstate, nn);
5982 	if (status)
5983 		return status;
5984 
5985 	clp = cstate->clp;
5986 	/* Find the matching lock stateowner */
5987 	spin_lock(&clp->cl_lock);
5988 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
5989 			    so_strhash) {
5990 
5991 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
5992 			continue;
5993 
5994 		/* see if there are still any locks associated with it */
5995 		lo = lockowner(sop);
5996 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5997 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
5998 				status = nfserr_locks_held;
5999 				spin_unlock(&clp->cl_lock);
6000 				return status;
6001 			}
6002 		}
6003 
6004 		nfs4_get_stateowner(sop);
6005 		break;
6006 	}
6007 	if (!lo) {
6008 		spin_unlock(&clp->cl_lock);
6009 		return status;
6010 	}
6011 
6012 	unhash_lockowner_locked(lo);
6013 	while (!list_empty(&lo->lo_owner.so_stateids)) {
6014 		stp = list_first_entry(&lo->lo_owner.so_stateids,
6015 				       struct nfs4_ol_stateid,
6016 				       st_perstateowner);
6017 		WARN_ON(!unhash_lock_stateid(stp));
6018 		put_ol_stateid_locked(stp, &reaplist);
6019 	}
6020 	spin_unlock(&clp->cl_lock);
6021 	free_ol_stateid_reaplist(&reaplist);
6022 	nfs4_put_stateowner(&lo->lo_owner);
6023 
6024 	return status;
6025 }
6026 
6027 static inline struct nfs4_client_reclaim *
6028 alloc_reclaim(void)
6029 {
6030 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6031 }
6032 
6033 bool
6034 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6035 {
6036 	struct nfs4_client_reclaim *crp;
6037 
6038 	crp = nfsd4_find_reclaim_client(name, nn);
6039 	return (crp && crp->cr_clp);
6040 }
6041 
6042 /*
6043  * failure => all reset bets are off, nfserr_no_grace...
6044  */
6045 struct nfs4_client_reclaim *
6046 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6047 {
6048 	unsigned int strhashval;
6049 	struct nfs4_client_reclaim *crp;
6050 
6051 	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6052 	crp = alloc_reclaim();
6053 	if (crp) {
6054 		strhashval = clientstr_hashval(name);
6055 		INIT_LIST_HEAD(&crp->cr_strhash);
6056 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6057 		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6058 		crp->cr_clp = NULL;
6059 		nn->reclaim_str_hashtbl_size++;
6060 	}
6061 	return crp;
6062 }
6063 
6064 void
6065 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6066 {
6067 	list_del(&crp->cr_strhash);
6068 	kfree(crp);
6069 	nn->reclaim_str_hashtbl_size--;
6070 }
6071 
6072 void
6073 nfs4_release_reclaim(struct nfsd_net *nn)
6074 {
6075 	struct nfs4_client_reclaim *crp = NULL;
6076 	int i;
6077 
6078 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6079 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6080 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6081 			                struct nfs4_client_reclaim, cr_strhash);
6082 			nfs4_remove_reclaim_record(crp, nn);
6083 		}
6084 	}
6085 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6086 }
6087 
6088 /*
6089  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6090 struct nfs4_client_reclaim *
6091 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6092 {
6093 	unsigned int strhashval;
6094 	struct nfs4_client_reclaim *crp = NULL;
6095 
6096 	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6097 
6098 	strhashval = clientstr_hashval(recdir);
6099 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6100 		if (same_name(crp->cr_recdir, recdir)) {
6101 			return crp;
6102 		}
6103 	}
6104 	return NULL;
6105 }
6106 
6107 /*
6108 * Called from OPEN. Look for clientid in reclaim list.
6109 */
6110 __be32
6111 nfs4_check_open_reclaim(clientid_t *clid,
6112 		struct nfsd4_compound_state *cstate,
6113 		struct nfsd_net *nn)
6114 {
6115 	__be32 status;
6116 
6117 	/* find clientid in conf_id_hashtbl */
6118 	status = lookup_clientid(clid, cstate, nn);
6119 	if (status)
6120 		return nfserr_reclaim_bad;
6121 
6122 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6123 		return nfserr_no_grace;
6124 
6125 	if (nfsd4_client_record_check(cstate->clp))
6126 		return nfserr_reclaim_bad;
6127 
6128 	return nfs_ok;
6129 }
6130 
6131 #ifdef CONFIG_NFSD_FAULT_INJECTION
6132 static inline void
6133 put_client(struct nfs4_client *clp)
6134 {
6135 	atomic_dec(&clp->cl_refcount);
6136 }
6137 
6138 static struct nfs4_client *
6139 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6140 {
6141 	struct nfs4_client *clp;
6142 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6143 					  nfsd_net_id);
6144 
6145 	if (!nfsd_netns_ready(nn))
6146 		return NULL;
6147 
6148 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6149 		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6150 			return clp;
6151 	}
6152 	return NULL;
6153 }
6154 
6155 u64
6156 nfsd_inject_print_clients(void)
6157 {
6158 	struct nfs4_client *clp;
6159 	u64 count = 0;
6160 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6161 					  nfsd_net_id);
6162 	char buf[INET6_ADDRSTRLEN];
6163 
6164 	if (!nfsd_netns_ready(nn))
6165 		return 0;
6166 
6167 	spin_lock(&nn->client_lock);
6168 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6169 		rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6170 		pr_info("NFS Client: %s\n", buf);
6171 		++count;
6172 	}
6173 	spin_unlock(&nn->client_lock);
6174 
6175 	return count;
6176 }
6177 
6178 u64
6179 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6180 {
6181 	u64 count = 0;
6182 	struct nfs4_client *clp;
6183 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6184 					  nfsd_net_id);
6185 
6186 	if (!nfsd_netns_ready(nn))
6187 		return count;
6188 
6189 	spin_lock(&nn->client_lock);
6190 	clp = nfsd_find_client(addr, addr_size);
6191 	if (clp) {
6192 		if (mark_client_expired_locked(clp) == nfs_ok)
6193 			++count;
6194 		else
6195 			clp = NULL;
6196 	}
6197 	spin_unlock(&nn->client_lock);
6198 
6199 	if (clp)
6200 		expire_client(clp);
6201 
6202 	return count;
6203 }
6204 
6205 u64
6206 nfsd_inject_forget_clients(u64 max)
6207 {
6208 	u64 count = 0;
6209 	struct nfs4_client *clp, *next;
6210 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6211 						nfsd_net_id);
6212 	LIST_HEAD(reaplist);
6213 
6214 	if (!nfsd_netns_ready(nn))
6215 		return count;
6216 
6217 	spin_lock(&nn->client_lock);
6218 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6219 		if (mark_client_expired_locked(clp) == nfs_ok) {
6220 			list_add(&clp->cl_lru, &reaplist);
6221 			if (max != 0 && ++count >= max)
6222 				break;
6223 		}
6224 	}
6225 	spin_unlock(&nn->client_lock);
6226 
6227 	list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6228 		expire_client(clp);
6229 
6230 	return count;
6231 }
6232 
6233 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6234 			     const char *type)
6235 {
6236 	char buf[INET6_ADDRSTRLEN];
6237 	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6238 	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6239 }
6240 
6241 static void
6242 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6243 			     struct list_head *collect)
6244 {
6245 	struct nfs4_client *clp = lst->st_stid.sc_client;
6246 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6247 					  nfsd_net_id);
6248 
6249 	if (!collect)
6250 		return;
6251 
6252 	lockdep_assert_held(&nn->client_lock);
6253 	atomic_inc(&clp->cl_refcount);
6254 	list_add(&lst->st_locks, collect);
6255 }
6256 
6257 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6258 				    struct list_head *collect,
6259 				    bool (*func)(struct nfs4_ol_stateid *))
6260 {
6261 	struct nfs4_openowner *oop;
6262 	struct nfs4_ol_stateid *stp, *st_next;
6263 	struct nfs4_ol_stateid *lst, *lst_next;
6264 	u64 count = 0;
6265 
6266 	spin_lock(&clp->cl_lock);
6267 	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6268 		list_for_each_entry_safe(stp, st_next,
6269 				&oop->oo_owner.so_stateids, st_perstateowner) {
6270 			list_for_each_entry_safe(lst, lst_next,
6271 					&stp->st_locks, st_locks) {
6272 				if (func) {
6273 					if (func(lst))
6274 						nfsd_inject_add_lock_to_list(lst,
6275 									collect);
6276 				}
6277 				++count;
6278 				/*
6279 				 * Despite the fact that these functions deal
6280 				 * with 64-bit integers for "count", we must
6281 				 * ensure that it doesn't blow up the
6282 				 * clp->cl_refcount. Throw a warning if we
6283 				 * start to approach INT_MAX here.
6284 				 */
6285 				WARN_ON_ONCE(count == (INT_MAX / 2));
6286 				if (count == max)
6287 					goto out;
6288 			}
6289 		}
6290 	}
6291 out:
6292 	spin_unlock(&clp->cl_lock);
6293 
6294 	return count;
6295 }
6296 
6297 static u64
6298 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6299 			  u64 max)
6300 {
6301 	return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6302 }
6303 
6304 static u64
6305 nfsd_print_client_locks(struct nfs4_client *clp)
6306 {
6307 	u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6308 	nfsd_print_count(clp, count, "locked files");
6309 	return count;
6310 }
6311 
6312 u64
6313 nfsd_inject_print_locks(void)
6314 {
6315 	struct nfs4_client *clp;
6316 	u64 count = 0;
6317 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6318 						nfsd_net_id);
6319 
6320 	if (!nfsd_netns_ready(nn))
6321 		return 0;
6322 
6323 	spin_lock(&nn->client_lock);
6324 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6325 		count += nfsd_print_client_locks(clp);
6326 	spin_unlock(&nn->client_lock);
6327 
6328 	return count;
6329 }
6330 
6331 static void
6332 nfsd_reap_locks(struct list_head *reaplist)
6333 {
6334 	struct nfs4_client *clp;
6335 	struct nfs4_ol_stateid *stp, *next;
6336 
6337 	list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6338 		list_del_init(&stp->st_locks);
6339 		clp = stp->st_stid.sc_client;
6340 		nfs4_put_stid(&stp->st_stid);
6341 		put_client(clp);
6342 	}
6343 }
6344 
6345 u64
6346 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6347 {
6348 	unsigned int count = 0;
6349 	struct nfs4_client *clp;
6350 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6351 						nfsd_net_id);
6352 	LIST_HEAD(reaplist);
6353 
6354 	if (!nfsd_netns_ready(nn))
6355 		return count;
6356 
6357 	spin_lock(&nn->client_lock);
6358 	clp = nfsd_find_client(addr, addr_size);
6359 	if (clp)
6360 		count = nfsd_collect_client_locks(clp, &reaplist, 0);
6361 	spin_unlock(&nn->client_lock);
6362 	nfsd_reap_locks(&reaplist);
6363 	return count;
6364 }
6365 
6366 u64
6367 nfsd_inject_forget_locks(u64 max)
6368 {
6369 	u64 count = 0;
6370 	struct nfs4_client *clp;
6371 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6372 						nfsd_net_id);
6373 	LIST_HEAD(reaplist);
6374 
6375 	if (!nfsd_netns_ready(nn))
6376 		return count;
6377 
6378 	spin_lock(&nn->client_lock);
6379 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6380 		count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6381 		if (max != 0 && count >= max)
6382 			break;
6383 	}
6384 	spin_unlock(&nn->client_lock);
6385 	nfsd_reap_locks(&reaplist);
6386 	return count;
6387 }
6388 
6389 static u64
6390 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6391 			      struct list_head *collect,
6392 			      void (*func)(struct nfs4_openowner *))
6393 {
6394 	struct nfs4_openowner *oop, *next;
6395 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6396 						nfsd_net_id);
6397 	u64 count = 0;
6398 
6399 	lockdep_assert_held(&nn->client_lock);
6400 
6401 	spin_lock(&clp->cl_lock);
6402 	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6403 		if (func) {
6404 			func(oop);
6405 			if (collect) {
6406 				atomic_inc(&clp->cl_refcount);
6407 				list_add(&oop->oo_perclient, collect);
6408 			}
6409 		}
6410 		++count;
6411 		/*
6412 		 * Despite the fact that these functions deal with
6413 		 * 64-bit integers for "count", we must ensure that
6414 		 * it doesn't blow up the clp->cl_refcount. Throw a
6415 		 * warning if we start to approach INT_MAX here.
6416 		 */
6417 		WARN_ON_ONCE(count == (INT_MAX / 2));
6418 		if (count == max)
6419 			break;
6420 	}
6421 	spin_unlock(&clp->cl_lock);
6422 
6423 	return count;
6424 }
6425 
6426 static u64
6427 nfsd_print_client_openowners(struct nfs4_client *clp)
6428 {
6429 	u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6430 
6431 	nfsd_print_count(clp, count, "openowners");
6432 	return count;
6433 }
6434 
6435 static u64
6436 nfsd_collect_client_openowners(struct nfs4_client *clp,
6437 			       struct list_head *collect, u64 max)
6438 {
6439 	return nfsd_foreach_client_openowner(clp, max, collect,
6440 						unhash_openowner_locked);
6441 }
6442 
6443 u64
6444 nfsd_inject_print_openowners(void)
6445 {
6446 	struct nfs4_client *clp;
6447 	u64 count = 0;
6448 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6449 						nfsd_net_id);
6450 
6451 	if (!nfsd_netns_ready(nn))
6452 		return 0;
6453 
6454 	spin_lock(&nn->client_lock);
6455 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6456 		count += nfsd_print_client_openowners(clp);
6457 	spin_unlock(&nn->client_lock);
6458 
6459 	return count;
6460 }
6461 
6462 static void
6463 nfsd_reap_openowners(struct list_head *reaplist)
6464 {
6465 	struct nfs4_client *clp;
6466 	struct nfs4_openowner *oop, *next;
6467 
6468 	list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6469 		list_del_init(&oop->oo_perclient);
6470 		clp = oop->oo_owner.so_client;
6471 		release_openowner(oop);
6472 		put_client(clp);
6473 	}
6474 }
6475 
6476 u64
6477 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6478 				     size_t addr_size)
6479 {
6480 	unsigned int count = 0;
6481 	struct nfs4_client *clp;
6482 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6483 						nfsd_net_id);
6484 	LIST_HEAD(reaplist);
6485 
6486 	if (!nfsd_netns_ready(nn))
6487 		return count;
6488 
6489 	spin_lock(&nn->client_lock);
6490 	clp = nfsd_find_client(addr, addr_size);
6491 	if (clp)
6492 		count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6493 	spin_unlock(&nn->client_lock);
6494 	nfsd_reap_openowners(&reaplist);
6495 	return count;
6496 }
6497 
6498 u64
6499 nfsd_inject_forget_openowners(u64 max)
6500 {
6501 	u64 count = 0;
6502 	struct nfs4_client *clp;
6503 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6504 						nfsd_net_id);
6505 	LIST_HEAD(reaplist);
6506 
6507 	if (!nfsd_netns_ready(nn))
6508 		return count;
6509 
6510 	spin_lock(&nn->client_lock);
6511 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6512 		count += nfsd_collect_client_openowners(clp, &reaplist,
6513 							max - count);
6514 		if (max != 0 && count >= max)
6515 			break;
6516 	}
6517 	spin_unlock(&nn->client_lock);
6518 	nfsd_reap_openowners(&reaplist);
6519 	return count;
6520 }
6521 
6522 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6523 				     struct list_head *victims)
6524 {
6525 	struct nfs4_delegation *dp, *next;
6526 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6527 						nfsd_net_id);
6528 	u64 count = 0;
6529 
6530 	lockdep_assert_held(&nn->client_lock);
6531 
6532 	spin_lock(&state_lock);
6533 	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6534 		if (victims) {
6535 			/*
6536 			 * It's not safe to mess with delegations that have a
6537 			 * non-zero dl_time. They might have already been broken
6538 			 * and could be processed by the laundromat outside of
6539 			 * the state_lock. Just leave them be.
6540 			 */
6541 			if (dp->dl_time != 0)
6542 				continue;
6543 
6544 			atomic_inc(&clp->cl_refcount);
6545 			WARN_ON(!unhash_delegation_locked(dp));
6546 			list_add(&dp->dl_recall_lru, victims);
6547 		}
6548 		++count;
6549 		/*
6550 		 * Despite the fact that these functions deal with
6551 		 * 64-bit integers for "count", we must ensure that
6552 		 * it doesn't blow up the clp->cl_refcount. Throw a
6553 		 * warning if we start to approach INT_MAX here.
6554 		 */
6555 		WARN_ON_ONCE(count == (INT_MAX / 2));
6556 		if (count == max)
6557 			break;
6558 	}
6559 	spin_unlock(&state_lock);
6560 	return count;
6561 }
6562 
6563 static u64
6564 nfsd_print_client_delegations(struct nfs4_client *clp)
6565 {
6566 	u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6567 
6568 	nfsd_print_count(clp, count, "delegations");
6569 	return count;
6570 }
6571 
6572 u64
6573 nfsd_inject_print_delegations(void)
6574 {
6575 	struct nfs4_client *clp;
6576 	u64 count = 0;
6577 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6578 						nfsd_net_id);
6579 
6580 	if (!nfsd_netns_ready(nn))
6581 		return 0;
6582 
6583 	spin_lock(&nn->client_lock);
6584 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6585 		count += nfsd_print_client_delegations(clp);
6586 	spin_unlock(&nn->client_lock);
6587 
6588 	return count;
6589 }
6590 
6591 static void
6592 nfsd_forget_delegations(struct list_head *reaplist)
6593 {
6594 	struct nfs4_client *clp;
6595 	struct nfs4_delegation *dp, *next;
6596 
6597 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6598 		list_del_init(&dp->dl_recall_lru);
6599 		clp = dp->dl_stid.sc_client;
6600 		revoke_delegation(dp);
6601 		put_client(clp);
6602 	}
6603 }
6604 
6605 u64
6606 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6607 				      size_t addr_size)
6608 {
6609 	u64 count = 0;
6610 	struct nfs4_client *clp;
6611 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6612 						nfsd_net_id);
6613 	LIST_HEAD(reaplist);
6614 
6615 	if (!nfsd_netns_ready(nn))
6616 		return count;
6617 
6618 	spin_lock(&nn->client_lock);
6619 	clp = nfsd_find_client(addr, addr_size);
6620 	if (clp)
6621 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6622 	spin_unlock(&nn->client_lock);
6623 
6624 	nfsd_forget_delegations(&reaplist);
6625 	return count;
6626 }
6627 
6628 u64
6629 nfsd_inject_forget_delegations(u64 max)
6630 {
6631 	u64 count = 0;
6632 	struct nfs4_client *clp;
6633 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6634 						nfsd_net_id);
6635 	LIST_HEAD(reaplist);
6636 
6637 	if (!nfsd_netns_ready(nn))
6638 		return count;
6639 
6640 	spin_lock(&nn->client_lock);
6641 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6642 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6643 		if (max != 0 && count >= max)
6644 			break;
6645 	}
6646 	spin_unlock(&nn->client_lock);
6647 	nfsd_forget_delegations(&reaplist);
6648 	return count;
6649 }
6650 
6651 static void
6652 nfsd_recall_delegations(struct list_head *reaplist)
6653 {
6654 	struct nfs4_client *clp;
6655 	struct nfs4_delegation *dp, *next;
6656 
6657 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6658 		list_del_init(&dp->dl_recall_lru);
6659 		clp = dp->dl_stid.sc_client;
6660 		/*
6661 		 * We skipped all entries that had a zero dl_time before,
6662 		 * so we can now reset the dl_time back to 0. If a delegation
6663 		 * break comes in now, then it won't make any difference since
6664 		 * we're recalling it either way.
6665 		 */
6666 		spin_lock(&state_lock);
6667 		dp->dl_time = 0;
6668 		spin_unlock(&state_lock);
6669 		nfsd_break_one_deleg(dp);
6670 		put_client(clp);
6671 	}
6672 }
6673 
6674 u64
6675 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6676 				      size_t addr_size)
6677 {
6678 	u64 count = 0;
6679 	struct nfs4_client *clp;
6680 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6681 						nfsd_net_id);
6682 	LIST_HEAD(reaplist);
6683 
6684 	if (!nfsd_netns_ready(nn))
6685 		return count;
6686 
6687 	spin_lock(&nn->client_lock);
6688 	clp = nfsd_find_client(addr, addr_size);
6689 	if (clp)
6690 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6691 	spin_unlock(&nn->client_lock);
6692 
6693 	nfsd_recall_delegations(&reaplist);
6694 	return count;
6695 }
6696 
6697 u64
6698 nfsd_inject_recall_delegations(u64 max)
6699 {
6700 	u64 count = 0;
6701 	struct nfs4_client *clp, *next;
6702 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6703 						nfsd_net_id);
6704 	LIST_HEAD(reaplist);
6705 
6706 	if (!nfsd_netns_ready(nn))
6707 		return count;
6708 
6709 	spin_lock(&nn->client_lock);
6710 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6711 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6712 		if (max != 0 && ++count >= max)
6713 			break;
6714 	}
6715 	spin_unlock(&nn->client_lock);
6716 	nfsd_recall_delegations(&reaplist);
6717 	return count;
6718 }
6719 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6720 
6721 /*
6722  * Since the lifetime of a delegation isn't limited to that of an open, a
6723  * client may quite reasonably hang on to a delegation as long as it has
6724  * the inode cached.  This becomes an obvious problem the first time a
6725  * client's inode cache approaches the size of the server's total memory.
6726  *
6727  * For now we avoid this problem by imposing a hard limit on the number
6728  * of delegations, which varies according to the server's memory size.
6729  */
6730 static void
6731 set_max_delegations(void)
6732 {
6733 	/*
6734 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
6735 	 * estimates suggest that in the worst case (where every delegation
6736 	 * is for a different inode), a delegation could take about 1.5K,
6737 	 * giving a worst case usage of about 6% of memory.
6738 	 */
6739 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
6740 }
6741 
6742 static int nfs4_state_create_net(struct net *net)
6743 {
6744 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6745 	int i;
6746 
6747 	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6748 			CLIENT_HASH_SIZE, GFP_KERNEL);
6749 	if (!nn->conf_id_hashtbl)
6750 		goto err;
6751 	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6752 			CLIENT_HASH_SIZE, GFP_KERNEL);
6753 	if (!nn->unconf_id_hashtbl)
6754 		goto err_unconf_id;
6755 	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
6756 			SESSION_HASH_SIZE, GFP_KERNEL);
6757 	if (!nn->sessionid_hashtbl)
6758 		goto err_sessionid;
6759 
6760 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6761 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
6762 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
6763 	}
6764 	for (i = 0; i < SESSION_HASH_SIZE; i++)
6765 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6766 	nn->conf_name_tree = RB_ROOT;
6767 	nn->unconf_name_tree = RB_ROOT;
6768 	INIT_LIST_HEAD(&nn->client_lru);
6769 	INIT_LIST_HEAD(&nn->close_lru);
6770 	INIT_LIST_HEAD(&nn->del_recall_lru);
6771 	spin_lock_init(&nn->client_lock);
6772 
6773 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6774 	get_net(net);
6775 
6776 	return 0;
6777 
6778 err_sessionid:
6779 	kfree(nn->unconf_id_hashtbl);
6780 err_unconf_id:
6781 	kfree(nn->conf_id_hashtbl);
6782 err:
6783 	return -ENOMEM;
6784 }
6785 
6786 static void
6787 nfs4_state_destroy_net(struct net *net)
6788 {
6789 	int i;
6790 	struct nfs4_client *clp = NULL;
6791 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6792 
6793 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6794 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
6795 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6796 			destroy_client(clp);
6797 		}
6798 	}
6799 
6800 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6801 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
6802 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6803 			destroy_client(clp);
6804 		}
6805 	}
6806 
6807 	kfree(nn->sessionid_hashtbl);
6808 	kfree(nn->unconf_id_hashtbl);
6809 	kfree(nn->conf_id_hashtbl);
6810 	put_net(net);
6811 }
6812 
6813 int
6814 nfs4_state_start_net(struct net *net)
6815 {
6816 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6817 	int ret;
6818 
6819 	ret = nfs4_state_create_net(net);
6820 	if (ret)
6821 		return ret;
6822 	nn->boot_time = get_seconds();
6823 	nn->grace_ended = false;
6824 	nn->nfsd4_manager.block_opens = true;
6825 	locks_start_grace(net, &nn->nfsd4_manager);
6826 	nfsd4_client_tracking_init(net);
6827 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
6828 	       nn->nfsd4_grace, net);
6829 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
6830 	return 0;
6831 }
6832 
6833 /* initialization to perform when the nfsd service is started: */
6834 
6835 int
6836 nfs4_state_start(void)
6837 {
6838 	int ret;
6839 
6840 	ret = set_callback_cred();
6841 	if (ret)
6842 		return -ENOMEM;
6843 	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
6844 	if (laundry_wq == NULL) {
6845 		ret = -ENOMEM;
6846 		goto out_recovery;
6847 	}
6848 	ret = nfsd4_create_callback_queue();
6849 	if (ret)
6850 		goto out_free_laundry;
6851 
6852 	set_max_delegations();
6853 
6854 	return 0;
6855 
6856 out_free_laundry:
6857 	destroy_workqueue(laundry_wq);
6858 out_recovery:
6859 	return ret;
6860 }
6861 
6862 void
6863 nfs4_state_shutdown_net(struct net *net)
6864 {
6865 	struct nfs4_delegation *dp = NULL;
6866 	struct list_head *pos, *next, reaplist;
6867 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6868 
6869 	cancel_delayed_work_sync(&nn->laundromat_work);
6870 	locks_end_grace(&nn->nfsd4_manager);
6871 
6872 	INIT_LIST_HEAD(&reaplist);
6873 	spin_lock(&state_lock);
6874 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
6875 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6876 		WARN_ON(!unhash_delegation_locked(dp));
6877 		list_add(&dp->dl_recall_lru, &reaplist);
6878 	}
6879 	spin_unlock(&state_lock);
6880 	list_for_each_safe(pos, next, &reaplist) {
6881 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6882 		list_del_init(&dp->dl_recall_lru);
6883 		put_clnt_odstate(dp->dl_clnt_odstate);
6884 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6885 		nfs4_put_stid(&dp->dl_stid);
6886 	}
6887 
6888 	nfsd4_client_tracking_exit(net);
6889 	nfs4_state_destroy_net(net);
6890 }
6891 
6892 void
6893 nfs4_state_shutdown(void)
6894 {
6895 	destroy_workqueue(laundry_wq);
6896 	nfsd4_destroy_callback_queue();
6897 }
6898 
6899 static void
6900 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6901 {
6902 	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
6903 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
6904 }
6905 
6906 static void
6907 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6908 {
6909 	if (cstate->minorversion) {
6910 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
6911 		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6912 	}
6913 }
6914 
6915 void
6916 clear_current_stateid(struct nfsd4_compound_state *cstate)
6917 {
6918 	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6919 }
6920 
6921 /*
6922  * functions to set current state id
6923  */
6924 void
6925 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6926 {
6927 	put_stateid(cstate, &odp->od_stateid);
6928 }
6929 
6930 void
6931 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
6932 {
6933 	put_stateid(cstate, &open->op_stateid);
6934 }
6935 
6936 void
6937 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6938 {
6939 	put_stateid(cstate, &close->cl_stateid);
6940 }
6941 
6942 void
6943 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
6944 {
6945 	put_stateid(cstate, &lock->lk_resp_stateid);
6946 }
6947 
6948 /*
6949  * functions to consume current state id
6950  */
6951 
6952 void
6953 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6954 {
6955 	get_stateid(cstate, &odp->od_stateid);
6956 }
6957 
6958 void
6959 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
6960 {
6961 	get_stateid(cstate, &drp->dr_stateid);
6962 }
6963 
6964 void
6965 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
6966 {
6967 	get_stateid(cstate, &fsp->fr_stateid);
6968 }
6969 
6970 void
6971 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
6972 {
6973 	get_stateid(cstate, &setattr->sa_stateid);
6974 }
6975 
6976 void
6977 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6978 {
6979 	get_stateid(cstate, &close->cl_stateid);
6980 }
6981 
6982 void
6983 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
6984 {
6985 	get_stateid(cstate, &locku->lu_stateid);
6986 }
6987 
6988 void
6989 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
6990 {
6991 	get_stateid(cstate, &read->rd_stateid);
6992 }
6993 
6994 void
6995 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
6996 {
6997 	get_stateid(cstate, &write->wr_stateid);
6998 }
6999