xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision 275876e2)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/hash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49 
50 #include "netns.h"
51 
52 #define NFSDDBG_FACILITY                NFSDDBG_PROC
53 
54 #define all_ones {{~0,~0},~0}
55 static const stateid_t one_stateid = {
56 	.si_generation = ~0,
57 	.si_opaque = all_ones,
58 };
59 static const stateid_t zero_stateid = {
60 	/* all fields zero */
61 };
62 static const stateid_t currentstateid = {
63 	.si_generation = 1,
64 };
65 
66 static u64 current_sessionid = 1;
67 
68 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
69 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
71 
72 /* forward declarations */
73 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
74 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
75 
76 /* Locking: */
77 
78 /*
79  * Currently used for the del_recall_lru and file hash table.  In an
80  * effort to decrease the scope of the client_mutex, this spinlock may
81  * eventually cover more:
82  */
83 static DEFINE_SPINLOCK(state_lock);
84 
85 /*
86  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
87  * the refcount on the open stateid to drop.
88  */
89 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
90 
91 static struct kmem_cache *openowner_slab;
92 static struct kmem_cache *lockowner_slab;
93 static struct kmem_cache *file_slab;
94 static struct kmem_cache *stateid_slab;
95 static struct kmem_cache *deleg_slab;
96 
97 static void free_session(struct nfsd4_session *);
98 
99 static bool is_session_dead(struct nfsd4_session *ses)
100 {
101 	return ses->se_flags & NFS4_SESSION_DEAD;
102 }
103 
104 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
105 {
106 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
107 		return nfserr_jukebox;
108 	ses->se_flags |= NFS4_SESSION_DEAD;
109 	return nfs_ok;
110 }
111 
112 static bool is_client_expired(struct nfs4_client *clp)
113 {
114 	return clp->cl_time == 0;
115 }
116 
117 static __be32 get_client_locked(struct nfs4_client *clp)
118 {
119 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
120 
121 	lockdep_assert_held(&nn->client_lock);
122 
123 	if (is_client_expired(clp))
124 		return nfserr_expired;
125 	atomic_inc(&clp->cl_refcount);
126 	return nfs_ok;
127 }
128 
129 /* must be called under the client_lock */
130 static inline void
131 renew_client_locked(struct nfs4_client *clp)
132 {
133 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
134 
135 	if (is_client_expired(clp)) {
136 		WARN_ON(1);
137 		printk("%s: client (clientid %08x/%08x) already expired\n",
138 			__func__,
139 			clp->cl_clientid.cl_boot,
140 			clp->cl_clientid.cl_id);
141 		return;
142 	}
143 
144 	dprintk("renewing client (clientid %08x/%08x)\n",
145 			clp->cl_clientid.cl_boot,
146 			clp->cl_clientid.cl_id);
147 	list_move_tail(&clp->cl_lru, &nn->client_lru);
148 	clp->cl_time = get_seconds();
149 }
150 
151 static inline void
152 renew_client(struct nfs4_client *clp)
153 {
154 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
155 
156 	spin_lock(&nn->client_lock);
157 	renew_client_locked(clp);
158 	spin_unlock(&nn->client_lock);
159 }
160 
161 static void put_client_renew_locked(struct nfs4_client *clp)
162 {
163 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
164 
165 	lockdep_assert_held(&nn->client_lock);
166 
167 	if (!atomic_dec_and_test(&clp->cl_refcount))
168 		return;
169 	if (!is_client_expired(clp))
170 		renew_client_locked(clp);
171 }
172 
173 static void put_client_renew(struct nfs4_client *clp)
174 {
175 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
176 
177 	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
178 		return;
179 	if (!is_client_expired(clp))
180 		renew_client_locked(clp);
181 	spin_unlock(&nn->client_lock);
182 }
183 
184 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
185 {
186 	__be32 status;
187 
188 	if (is_session_dead(ses))
189 		return nfserr_badsession;
190 	status = get_client_locked(ses->se_client);
191 	if (status)
192 		return status;
193 	atomic_inc(&ses->se_ref);
194 	return nfs_ok;
195 }
196 
197 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
198 {
199 	struct nfs4_client *clp = ses->se_client;
200 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
201 
202 	lockdep_assert_held(&nn->client_lock);
203 
204 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
205 		free_session(ses);
206 	put_client_renew_locked(clp);
207 }
208 
209 static void nfsd4_put_session(struct nfsd4_session *ses)
210 {
211 	struct nfs4_client *clp = ses->se_client;
212 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
213 
214 	spin_lock(&nn->client_lock);
215 	nfsd4_put_session_locked(ses);
216 	spin_unlock(&nn->client_lock);
217 }
218 
219 static int
220 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
221 {
222 	return (sop->so_owner.len == owner->len) &&
223 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
224 }
225 
226 static struct nfs4_openowner *
227 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
228 			struct nfs4_client *clp)
229 {
230 	struct nfs4_stateowner *so;
231 
232 	lockdep_assert_held(&clp->cl_lock);
233 
234 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
235 			    so_strhash) {
236 		if (!so->so_is_open_owner)
237 			continue;
238 		if (same_owner_str(so, &open->op_owner)) {
239 			atomic_inc(&so->so_count);
240 			return openowner(so);
241 		}
242 	}
243 	return NULL;
244 }
245 
246 static struct nfs4_openowner *
247 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
248 			struct nfs4_client *clp)
249 {
250 	struct nfs4_openowner *oo;
251 
252 	spin_lock(&clp->cl_lock);
253 	oo = find_openstateowner_str_locked(hashval, open, clp);
254 	spin_unlock(&clp->cl_lock);
255 	return oo;
256 }
257 
258 static inline u32
259 opaque_hashval(const void *ptr, int nbytes)
260 {
261 	unsigned char *cptr = (unsigned char *) ptr;
262 
263 	u32 x = 0;
264 	while (nbytes--) {
265 		x *= 37;
266 		x += *cptr++;
267 	}
268 	return x;
269 }
270 
271 static void nfsd4_free_file(struct nfs4_file *f)
272 {
273 	kmem_cache_free(file_slab, f);
274 }
275 
276 static inline void
277 put_nfs4_file(struct nfs4_file *fi)
278 {
279 	might_lock(&state_lock);
280 
281 	if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
282 		hlist_del(&fi->fi_hash);
283 		spin_unlock(&state_lock);
284 		nfsd4_free_file(fi);
285 	}
286 }
287 
288 static inline void
289 get_nfs4_file(struct nfs4_file *fi)
290 {
291 	atomic_inc(&fi->fi_ref);
292 }
293 
294 static struct file *
295 __nfs4_get_fd(struct nfs4_file *f, int oflag)
296 {
297 	if (f->fi_fds[oflag])
298 		return get_file(f->fi_fds[oflag]);
299 	return NULL;
300 }
301 
302 static struct file *
303 find_writeable_file_locked(struct nfs4_file *f)
304 {
305 	struct file *ret;
306 
307 	lockdep_assert_held(&f->fi_lock);
308 
309 	ret = __nfs4_get_fd(f, O_WRONLY);
310 	if (!ret)
311 		ret = __nfs4_get_fd(f, O_RDWR);
312 	return ret;
313 }
314 
315 static struct file *
316 find_writeable_file(struct nfs4_file *f)
317 {
318 	struct file *ret;
319 
320 	spin_lock(&f->fi_lock);
321 	ret = find_writeable_file_locked(f);
322 	spin_unlock(&f->fi_lock);
323 
324 	return ret;
325 }
326 
327 static struct file *find_readable_file_locked(struct nfs4_file *f)
328 {
329 	struct file *ret;
330 
331 	lockdep_assert_held(&f->fi_lock);
332 
333 	ret = __nfs4_get_fd(f, O_RDONLY);
334 	if (!ret)
335 		ret = __nfs4_get_fd(f, O_RDWR);
336 	return ret;
337 }
338 
339 static struct file *
340 find_readable_file(struct nfs4_file *f)
341 {
342 	struct file *ret;
343 
344 	spin_lock(&f->fi_lock);
345 	ret = find_readable_file_locked(f);
346 	spin_unlock(&f->fi_lock);
347 
348 	return ret;
349 }
350 
351 static struct file *
352 find_any_file(struct nfs4_file *f)
353 {
354 	struct file *ret;
355 
356 	spin_lock(&f->fi_lock);
357 	ret = __nfs4_get_fd(f, O_RDWR);
358 	if (!ret) {
359 		ret = __nfs4_get_fd(f, O_WRONLY);
360 		if (!ret)
361 			ret = __nfs4_get_fd(f, O_RDONLY);
362 	}
363 	spin_unlock(&f->fi_lock);
364 	return ret;
365 }
366 
367 static atomic_long_t num_delegations;
368 unsigned long max_delegations;
369 
370 /*
371  * Open owner state (share locks)
372  */
373 
374 /* hash tables for lock and open owners */
375 #define OWNER_HASH_BITS              8
376 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
377 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
378 
379 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
380 {
381 	unsigned int ret;
382 
383 	ret = opaque_hashval(ownername->data, ownername->len);
384 	return ret & OWNER_HASH_MASK;
385 }
386 
387 /* hash table for nfs4_file */
388 #define FILE_HASH_BITS                   8
389 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
390 
391 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
392 {
393 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
394 }
395 
396 static unsigned int file_hashval(struct knfsd_fh *fh)
397 {
398 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
399 }
400 
401 static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
402 {
403 	return fh1->fh_size == fh2->fh_size &&
404 		!memcmp(fh1->fh_base.fh_pad,
405 				fh2->fh_base.fh_pad,
406 				fh1->fh_size);
407 }
408 
409 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
410 
411 static void
412 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
413 {
414 	lockdep_assert_held(&fp->fi_lock);
415 
416 	if (access & NFS4_SHARE_ACCESS_WRITE)
417 		atomic_inc(&fp->fi_access[O_WRONLY]);
418 	if (access & NFS4_SHARE_ACCESS_READ)
419 		atomic_inc(&fp->fi_access[O_RDONLY]);
420 }
421 
422 static __be32
423 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
424 {
425 	lockdep_assert_held(&fp->fi_lock);
426 
427 	/* Does this access mode make sense? */
428 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
429 		return nfserr_inval;
430 
431 	/* Does it conflict with a deny mode already set? */
432 	if ((access & fp->fi_share_deny) != 0)
433 		return nfserr_share_denied;
434 
435 	__nfs4_file_get_access(fp, access);
436 	return nfs_ok;
437 }
438 
439 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
440 {
441 	/* Common case is that there is no deny mode. */
442 	if (deny) {
443 		/* Does this deny mode make sense? */
444 		if (deny & ~NFS4_SHARE_DENY_BOTH)
445 			return nfserr_inval;
446 
447 		if ((deny & NFS4_SHARE_DENY_READ) &&
448 		    atomic_read(&fp->fi_access[O_RDONLY]))
449 			return nfserr_share_denied;
450 
451 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
452 		    atomic_read(&fp->fi_access[O_WRONLY]))
453 			return nfserr_share_denied;
454 	}
455 	return nfs_ok;
456 }
457 
458 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
459 {
460 	might_lock(&fp->fi_lock);
461 
462 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
463 		struct file *f1 = NULL;
464 		struct file *f2 = NULL;
465 
466 		swap(f1, fp->fi_fds[oflag]);
467 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
468 			swap(f2, fp->fi_fds[O_RDWR]);
469 		spin_unlock(&fp->fi_lock);
470 		if (f1)
471 			fput(f1);
472 		if (f2)
473 			fput(f2);
474 	}
475 }
476 
477 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
478 {
479 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
480 
481 	if (access & NFS4_SHARE_ACCESS_WRITE)
482 		__nfs4_file_put_access(fp, O_WRONLY);
483 	if (access & NFS4_SHARE_ACCESS_READ)
484 		__nfs4_file_put_access(fp, O_RDONLY);
485 }
486 
487 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
488 					 struct kmem_cache *slab)
489 {
490 	struct nfs4_stid *stid;
491 	int new_id;
492 
493 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
494 	if (!stid)
495 		return NULL;
496 
497 	idr_preload(GFP_KERNEL);
498 	spin_lock(&cl->cl_lock);
499 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
500 	spin_unlock(&cl->cl_lock);
501 	idr_preload_end();
502 	if (new_id < 0)
503 		goto out_free;
504 	stid->sc_client = cl;
505 	stid->sc_stateid.si_opaque.so_id = new_id;
506 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
507 	/* Will be incremented before return to client: */
508 	atomic_set(&stid->sc_count, 1);
509 
510 	/*
511 	 * It shouldn't be a problem to reuse an opaque stateid value.
512 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
513 	 * example, a stray write retransmission could be accepted by
514 	 * the server when it should have been rejected.  Therefore,
515 	 * adopt a trick from the sctp code to attempt to maximize the
516 	 * amount of time until an id is reused, by ensuring they always
517 	 * "increase" (mod INT_MAX):
518 	 */
519 	return stid;
520 out_free:
521 	kmem_cache_free(slab, stid);
522 	return NULL;
523 }
524 
525 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
526 {
527 	struct nfs4_stid *stid;
528 	struct nfs4_ol_stateid *stp;
529 
530 	stid = nfs4_alloc_stid(clp, stateid_slab);
531 	if (!stid)
532 		return NULL;
533 
534 	stp = openlockstateid(stid);
535 	stp->st_stid.sc_free = nfs4_free_ol_stateid;
536 	return stp;
537 }
538 
539 static void nfs4_free_deleg(struct nfs4_stid *stid)
540 {
541 	kmem_cache_free(deleg_slab, stid);
542 	atomic_long_dec(&num_delegations);
543 }
544 
545 /*
546  * When we recall a delegation, we should be careful not to hand it
547  * out again straight away.
548  * To ensure this we keep a pair of bloom filters ('new' and 'old')
549  * in which the filehandles of recalled delegations are "stored".
550  * If a filehandle appear in either filter, a delegation is blocked.
551  * When a delegation is recalled, the filehandle is stored in the "new"
552  * filter.
553  * Every 30 seconds we swap the filters and clear the "new" one,
554  * unless both are empty of course.
555  *
556  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
557  * low 3 bytes as hash-table indices.
558  *
559  * 'blocked_delegations_lock', which is always taken in block_delegations(),
560  * is used to manage concurrent access.  Testing does not need the lock
561  * except when swapping the two filters.
562  */
563 static DEFINE_SPINLOCK(blocked_delegations_lock);
564 static struct bloom_pair {
565 	int	entries, old_entries;
566 	time_t	swap_time;
567 	int	new; /* index into 'set' */
568 	DECLARE_BITMAP(set[2], 256);
569 } blocked_delegations;
570 
571 static int delegation_blocked(struct knfsd_fh *fh)
572 {
573 	u32 hash;
574 	struct bloom_pair *bd = &blocked_delegations;
575 
576 	if (bd->entries == 0)
577 		return 0;
578 	if (seconds_since_boot() - bd->swap_time > 30) {
579 		spin_lock(&blocked_delegations_lock);
580 		if (seconds_since_boot() - bd->swap_time > 30) {
581 			bd->entries -= bd->old_entries;
582 			bd->old_entries = bd->entries;
583 			memset(bd->set[bd->new], 0,
584 			       sizeof(bd->set[0]));
585 			bd->new = 1-bd->new;
586 			bd->swap_time = seconds_since_boot();
587 		}
588 		spin_unlock(&blocked_delegations_lock);
589 	}
590 	hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
591 	if (test_bit(hash&255, bd->set[0]) &&
592 	    test_bit((hash>>8)&255, bd->set[0]) &&
593 	    test_bit((hash>>16)&255, bd->set[0]))
594 		return 1;
595 
596 	if (test_bit(hash&255, bd->set[1]) &&
597 	    test_bit((hash>>8)&255, bd->set[1]) &&
598 	    test_bit((hash>>16)&255, bd->set[1]))
599 		return 1;
600 
601 	return 0;
602 }
603 
604 static void block_delegations(struct knfsd_fh *fh)
605 {
606 	u32 hash;
607 	struct bloom_pair *bd = &blocked_delegations;
608 
609 	hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
610 
611 	spin_lock(&blocked_delegations_lock);
612 	__set_bit(hash&255, bd->set[bd->new]);
613 	__set_bit((hash>>8)&255, bd->set[bd->new]);
614 	__set_bit((hash>>16)&255, bd->set[bd->new]);
615 	if (bd->entries == 0)
616 		bd->swap_time = seconds_since_boot();
617 	bd->entries += 1;
618 	spin_unlock(&blocked_delegations_lock);
619 }
620 
621 static struct nfs4_delegation *
622 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
623 {
624 	struct nfs4_delegation *dp;
625 	long n;
626 
627 	dprintk("NFSD alloc_init_deleg\n");
628 	n = atomic_long_inc_return(&num_delegations);
629 	if (n < 0 || n > max_delegations)
630 		goto out_dec;
631 	if (delegation_blocked(&current_fh->fh_handle))
632 		goto out_dec;
633 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
634 	if (dp == NULL)
635 		goto out_dec;
636 
637 	dp->dl_stid.sc_free = nfs4_free_deleg;
638 	/*
639 	 * delegation seqid's are never incremented.  The 4.1 special
640 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
641 	 * 0 anyway just for consistency and use 1:
642 	 */
643 	dp->dl_stid.sc_stateid.si_generation = 1;
644 	INIT_LIST_HEAD(&dp->dl_perfile);
645 	INIT_LIST_HEAD(&dp->dl_perclnt);
646 	INIT_LIST_HEAD(&dp->dl_recall_lru);
647 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
648 	INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall);
649 	return dp;
650 out_dec:
651 	atomic_long_dec(&num_delegations);
652 	return NULL;
653 }
654 
655 void
656 nfs4_put_stid(struct nfs4_stid *s)
657 {
658 	struct nfs4_file *fp = s->sc_file;
659 	struct nfs4_client *clp = s->sc_client;
660 
661 	might_lock(&clp->cl_lock);
662 
663 	if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
664 		wake_up_all(&close_wq);
665 		return;
666 	}
667 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
668 	spin_unlock(&clp->cl_lock);
669 	s->sc_free(s);
670 	if (fp)
671 		put_nfs4_file(fp);
672 }
673 
674 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
675 {
676 	lockdep_assert_held(&state_lock);
677 
678 	if (!fp->fi_lease)
679 		return;
680 	if (atomic_dec_and_test(&fp->fi_delegees)) {
681 		vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
682 		fp->fi_lease = NULL;
683 		fput(fp->fi_deleg_file);
684 		fp->fi_deleg_file = NULL;
685 	}
686 }
687 
688 static void unhash_stid(struct nfs4_stid *s)
689 {
690 	s->sc_type = 0;
691 }
692 
693 static void
694 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
695 {
696 	lockdep_assert_held(&state_lock);
697 	lockdep_assert_held(&fp->fi_lock);
698 
699 	atomic_inc(&dp->dl_stid.sc_count);
700 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
701 	list_add(&dp->dl_perfile, &fp->fi_delegations);
702 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
703 }
704 
705 static void
706 unhash_delegation_locked(struct nfs4_delegation *dp)
707 {
708 	struct nfs4_file *fp = dp->dl_stid.sc_file;
709 
710 	lockdep_assert_held(&state_lock);
711 
712 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
713 	/* Ensure that deleg break won't try to requeue it */
714 	++dp->dl_time;
715 	spin_lock(&fp->fi_lock);
716 	list_del_init(&dp->dl_perclnt);
717 	list_del_init(&dp->dl_recall_lru);
718 	list_del_init(&dp->dl_perfile);
719 	spin_unlock(&fp->fi_lock);
720 	if (fp)
721 		nfs4_put_deleg_lease(fp);
722 }
723 
724 static void destroy_delegation(struct nfs4_delegation *dp)
725 {
726 	spin_lock(&state_lock);
727 	unhash_delegation_locked(dp);
728 	spin_unlock(&state_lock);
729 	nfs4_put_stid(&dp->dl_stid);
730 }
731 
732 static void revoke_delegation(struct nfs4_delegation *dp)
733 {
734 	struct nfs4_client *clp = dp->dl_stid.sc_client;
735 
736 	WARN_ON(!list_empty(&dp->dl_recall_lru));
737 
738 	if (clp->cl_minorversion == 0)
739 		nfs4_put_stid(&dp->dl_stid);
740 	else {
741 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
742 		spin_lock(&clp->cl_lock);
743 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
744 		spin_unlock(&clp->cl_lock);
745 	}
746 }
747 
748 /*
749  * SETCLIENTID state
750  */
751 
752 static unsigned int clientid_hashval(u32 id)
753 {
754 	return id & CLIENT_HASH_MASK;
755 }
756 
757 static unsigned int clientstr_hashval(const char *name)
758 {
759 	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
760 }
761 
762 /*
763  * We store the NONE, READ, WRITE, and BOTH bits separately in the
764  * st_{access,deny}_bmap field of the stateid, in order to track not
765  * only what share bits are currently in force, but also what
766  * combinations of share bits previous opens have used.  This allows us
767  * to enforce the recommendation of rfc 3530 14.2.19 that the server
768  * return an error if the client attempt to downgrade to a combination
769  * of share bits not explicable by closing some of its previous opens.
770  *
771  * XXX: This enforcement is actually incomplete, since we don't keep
772  * track of access/deny bit combinations; so, e.g., we allow:
773  *
774  *	OPEN allow read, deny write
775  *	OPEN allow both, deny none
776  *	DOWNGRADE allow read, deny none
777  *
778  * which we should reject.
779  */
780 static unsigned int
781 bmap_to_share_mode(unsigned long bmap) {
782 	int i;
783 	unsigned int access = 0;
784 
785 	for (i = 1; i < 4; i++) {
786 		if (test_bit(i, &bmap))
787 			access |= i;
788 	}
789 	return access;
790 }
791 
792 /* set share access for a given stateid */
793 static inline void
794 set_access(u32 access, struct nfs4_ol_stateid *stp)
795 {
796 	unsigned char mask = 1 << access;
797 
798 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
799 	stp->st_access_bmap |= mask;
800 }
801 
802 /* clear share access for a given stateid */
803 static inline void
804 clear_access(u32 access, struct nfs4_ol_stateid *stp)
805 {
806 	unsigned char mask = 1 << access;
807 
808 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
809 	stp->st_access_bmap &= ~mask;
810 }
811 
812 /* test whether a given stateid has access */
813 static inline bool
814 test_access(u32 access, struct nfs4_ol_stateid *stp)
815 {
816 	unsigned char mask = 1 << access;
817 
818 	return (bool)(stp->st_access_bmap & mask);
819 }
820 
821 /* set share deny for a given stateid */
822 static inline void
823 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
824 {
825 	unsigned char mask = 1 << deny;
826 
827 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
828 	stp->st_deny_bmap |= mask;
829 }
830 
831 /* clear share deny for a given stateid */
832 static inline void
833 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
834 {
835 	unsigned char mask = 1 << deny;
836 
837 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
838 	stp->st_deny_bmap &= ~mask;
839 }
840 
841 /* test whether a given stateid is denying specific access */
842 static inline bool
843 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
844 {
845 	unsigned char mask = 1 << deny;
846 
847 	return (bool)(stp->st_deny_bmap & mask);
848 }
849 
850 static int nfs4_access_to_omode(u32 access)
851 {
852 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
853 	case NFS4_SHARE_ACCESS_READ:
854 		return O_RDONLY;
855 	case NFS4_SHARE_ACCESS_WRITE:
856 		return O_WRONLY;
857 	case NFS4_SHARE_ACCESS_BOTH:
858 		return O_RDWR;
859 	}
860 	WARN_ON_ONCE(1);
861 	return O_RDONLY;
862 }
863 
864 /*
865  * A stateid that had a deny mode associated with it is being released
866  * or downgraded. Recalculate the deny mode on the file.
867  */
868 static void
869 recalculate_deny_mode(struct nfs4_file *fp)
870 {
871 	struct nfs4_ol_stateid *stp;
872 
873 	spin_lock(&fp->fi_lock);
874 	fp->fi_share_deny = 0;
875 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
876 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
877 	spin_unlock(&fp->fi_lock);
878 }
879 
880 static void
881 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
882 {
883 	int i;
884 	bool change = false;
885 
886 	for (i = 1; i < 4; i++) {
887 		if ((i & deny) != i) {
888 			change = true;
889 			clear_deny(i, stp);
890 		}
891 	}
892 
893 	/* Recalculate per-file deny mode if there was a change */
894 	if (change)
895 		recalculate_deny_mode(stp->st_stid.sc_file);
896 }
897 
898 /* release all access and file references for a given stateid */
899 static void
900 release_all_access(struct nfs4_ol_stateid *stp)
901 {
902 	int i;
903 	struct nfs4_file *fp = stp->st_stid.sc_file;
904 
905 	if (fp && stp->st_deny_bmap != 0)
906 		recalculate_deny_mode(fp);
907 
908 	for (i = 1; i < 4; i++) {
909 		if (test_access(i, stp))
910 			nfs4_file_put_access(stp->st_stid.sc_file, i);
911 		clear_access(i, stp);
912 	}
913 }
914 
915 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
916 {
917 	struct nfs4_client *clp = sop->so_client;
918 
919 	might_lock(&clp->cl_lock);
920 
921 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
922 		return;
923 	sop->so_ops->so_unhash(sop);
924 	spin_unlock(&clp->cl_lock);
925 	kfree(sop->so_owner.data);
926 	sop->so_ops->so_free(sop);
927 }
928 
929 static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
930 {
931 	struct nfs4_file *fp = stp->st_stid.sc_file;
932 
933 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
934 
935 	spin_lock(&fp->fi_lock);
936 	list_del(&stp->st_perfile);
937 	spin_unlock(&fp->fi_lock);
938 	list_del(&stp->st_perstateowner);
939 }
940 
941 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
942 {
943 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
944 
945 	release_all_access(stp);
946 	if (stp->st_stateowner)
947 		nfs4_put_stateowner(stp->st_stateowner);
948 	kmem_cache_free(stateid_slab, stid);
949 }
950 
951 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
952 {
953 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
954 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
955 	struct file *file;
956 
957 	file = find_any_file(stp->st_stid.sc_file);
958 	if (file)
959 		filp_close(file, (fl_owner_t)lo);
960 	nfs4_free_ol_stateid(stid);
961 }
962 
963 /*
964  * Put the persistent reference to an already unhashed generic stateid, while
965  * holding the cl_lock. If it's the last reference, then put it onto the
966  * reaplist for later destruction.
967  */
968 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
969 				       struct list_head *reaplist)
970 {
971 	struct nfs4_stid *s = &stp->st_stid;
972 	struct nfs4_client *clp = s->sc_client;
973 
974 	lockdep_assert_held(&clp->cl_lock);
975 
976 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
977 
978 	if (!atomic_dec_and_test(&s->sc_count)) {
979 		wake_up_all(&close_wq);
980 		return;
981 	}
982 
983 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
984 	list_add(&stp->st_locks, reaplist);
985 }
986 
987 static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
988 {
989 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
990 
991 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
992 
993 	list_del_init(&stp->st_locks);
994 	unhash_ol_stateid(stp);
995 	unhash_stid(&stp->st_stid);
996 }
997 
998 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
999 {
1000 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1001 
1002 	spin_lock(&oo->oo_owner.so_client->cl_lock);
1003 	unhash_lock_stateid(stp);
1004 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
1005 	nfs4_put_stid(&stp->st_stid);
1006 }
1007 
1008 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1009 {
1010 	struct nfs4_client *clp = lo->lo_owner.so_client;
1011 
1012 	lockdep_assert_held(&clp->cl_lock);
1013 
1014 	list_del_init(&lo->lo_owner.so_strhash);
1015 }
1016 
1017 /*
1018  * Free a list of generic stateids that were collected earlier after being
1019  * fully unhashed.
1020  */
1021 static void
1022 free_ol_stateid_reaplist(struct list_head *reaplist)
1023 {
1024 	struct nfs4_ol_stateid *stp;
1025 	struct nfs4_file *fp;
1026 
1027 	might_sleep();
1028 
1029 	while (!list_empty(reaplist)) {
1030 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1031 				       st_locks);
1032 		list_del(&stp->st_locks);
1033 		fp = stp->st_stid.sc_file;
1034 		stp->st_stid.sc_free(&stp->st_stid);
1035 		if (fp)
1036 			put_nfs4_file(fp);
1037 	}
1038 }
1039 
1040 static void release_lockowner(struct nfs4_lockowner *lo)
1041 {
1042 	struct nfs4_client *clp = lo->lo_owner.so_client;
1043 	struct nfs4_ol_stateid *stp;
1044 	struct list_head reaplist;
1045 
1046 	INIT_LIST_HEAD(&reaplist);
1047 
1048 	spin_lock(&clp->cl_lock);
1049 	unhash_lockowner_locked(lo);
1050 	while (!list_empty(&lo->lo_owner.so_stateids)) {
1051 		stp = list_first_entry(&lo->lo_owner.so_stateids,
1052 				struct nfs4_ol_stateid, st_perstateowner);
1053 		unhash_lock_stateid(stp);
1054 		put_ol_stateid_locked(stp, &reaplist);
1055 	}
1056 	spin_unlock(&clp->cl_lock);
1057 	free_ol_stateid_reaplist(&reaplist);
1058 	nfs4_put_stateowner(&lo->lo_owner);
1059 }
1060 
1061 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1062 				       struct list_head *reaplist)
1063 {
1064 	struct nfs4_ol_stateid *stp;
1065 
1066 	while (!list_empty(&open_stp->st_locks)) {
1067 		stp = list_entry(open_stp->st_locks.next,
1068 				struct nfs4_ol_stateid, st_locks);
1069 		unhash_lock_stateid(stp);
1070 		put_ol_stateid_locked(stp, reaplist);
1071 	}
1072 }
1073 
1074 static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
1075 				struct list_head *reaplist)
1076 {
1077 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1078 
1079 	unhash_ol_stateid(stp);
1080 	release_open_stateid_locks(stp, reaplist);
1081 }
1082 
1083 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1084 {
1085 	LIST_HEAD(reaplist);
1086 
1087 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1088 	unhash_open_stateid(stp, &reaplist);
1089 	put_ol_stateid_locked(stp, &reaplist);
1090 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1091 	free_ol_stateid_reaplist(&reaplist);
1092 }
1093 
1094 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1095 {
1096 	struct nfs4_client *clp = oo->oo_owner.so_client;
1097 
1098 	lockdep_assert_held(&clp->cl_lock);
1099 
1100 	list_del_init(&oo->oo_owner.so_strhash);
1101 	list_del_init(&oo->oo_perclient);
1102 }
1103 
1104 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1105 {
1106 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1107 					  nfsd_net_id);
1108 	struct nfs4_ol_stateid *s;
1109 
1110 	spin_lock(&nn->client_lock);
1111 	s = oo->oo_last_closed_stid;
1112 	if (s) {
1113 		list_del_init(&oo->oo_close_lru);
1114 		oo->oo_last_closed_stid = NULL;
1115 	}
1116 	spin_unlock(&nn->client_lock);
1117 	if (s)
1118 		nfs4_put_stid(&s->st_stid);
1119 }
1120 
1121 static void release_openowner(struct nfs4_openowner *oo)
1122 {
1123 	struct nfs4_ol_stateid *stp;
1124 	struct nfs4_client *clp = oo->oo_owner.so_client;
1125 	struct list_head reaplist;
1126 
1127 	INIT_LIST_HEAD(&reaplist);
1128 
1129 	spin_lock(&clp->cl_lock);
1130 	unhash_openowner_locked(oo);
1131 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1132 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1133 				struct nfs4_ol_stateid, st_perstateowner);
1134 		unhash_open_stateid(stp, &reaplist);
1135 		put_ol_stateid_locked(stp, &reaplist);
1136 	}
1137 	spin_unlock(&clp->cl_lock);
1138 	free_ol_stateid_reaplist(&reaplist);
1139 	release_last_closed_stateid(oo);
1140 	nfs4_put_stateowner(&oo->oo_owner);
1141 }
1142 
1143 static inline int
1144 hash_sessionid(struct nfs4_sessionid *sessionid)
1145 {
1146 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1147 
1148 	return sid->sequence % SESSION_HASH_SIZE;
1149 }
1150 
1151 #ifdef NFSD_DEBUG
1152 static inline void
1153 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1154 {
1155 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1156 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1157 }
1158 #else
1159 static inline void
1160 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1161 {
1162 }
1163 #endif
1164 
1165 /*
1166  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1167  * won't be used for replay.
1168  */
1169 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1170 {
1171 	struct nfs4_stateowner *so = cstate->replay_owner;
1172 
1173 	if (nfserr == nfserr_replay_me)
1174 		return;
1175 
1176 	if (!seqid_mutating_err(ntohl(nfserr))) {
1177 		nfsd4_cstate_clear_replay(cstate);
1178 		return;
1179 	}
1180 	if (!so)
1181 		return;
1182 	if (so->so_is_open_owner)
1183 		release_last_closed_stateid(openowner(so));
1184 	so->so_seqid++;
1185 	return;
1186 }
1187 
1188 static void
1189 gen_sessionid(struct nfsd4_session *ses)
1190 {
1191 	struct nfs4_client *clp = ses->se_client;
1192 	struct nfsd4_sessionid *sid;
1193 
1194 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1195 	sid->clientid = clp->cl_clientid;
1196 	sid->sequence = current_sessionid++;
1197 	sid->reserved = 0;
1198 }
1199 
1200 /*
1201  * The protocol defines ca_maxresponssize_cached to include the size of
1202  * the rpc header, but all we need to cache is the data starting after
1203  * the end of the initial SEQUENCE operation--the rest we regenerate
1204  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1205  * value that is the number of bytes in our cache plus a few additional
1206  * bytes.  In order to stay on the safe side, and not promise more than
1207  * we can cache, those additional bytes must be the minimum possible: 24
1208  * bytes of rpc header (xid through accept state, with AUTH_NULL
1209  * verifier), 12 for the compound header (with zero-length tag), and 44
1210  * for the SEQUENCE op response:
1211  */
1212 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1213 
1214 static void
1215 free_session_slots(struct nfsd4_session *ses)
1216 {
1217 	int i;
1218 
1219 	for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1220 		kfree(ses->se_slots[i]);
1221 }
1222 
1223 /*
1224  * We don't actually need to cache the rpc and session headers, so we
1225  * can allocate a little less for each slot:
1226  */
1227 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1228 {
1229 	u32 size;
1230 
1231 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1232 		size = 0;
1233 	else
1234 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1235 	return size + sizeof(struct nfsd4_slot);
1236 }
1237 
1238 /*
1239  * XXX: If we run out of reserved DRC memory we could (up to a point)
1240  * re-negotiate active sessions and reduce their slot usage to make
1241  * room for new connections. For now we just fail the create session.
1242  */
1243 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1244 {
1245 	u32 slotsize = slot_bytes(ca);
1246 	u32 num = ca->maxreqs;
1247 	int avail;
1248 
1249 	spin_lock(&nfsd_drc_lock);
1250 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1251 		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1252 	num = min_t(int, num, avail / slotsize);
1253 	nfsd_drc_mem_used += num * slotsize;
1254 	spin_unlock(&nfsd_drc_lock);
1255 
1256 	return num;
1257 }
1258 
1259 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1260 {
1261 	int slotsize = slot_bytes(ca);
1262 
1263 	spin_lock(&nfsd_drc_lock);
1264 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1265 	spin_unlock(&nfsd_drc_lock);
1266 }
1267 
1268 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1269 					   struct nfsd4_channel_attrs *battrs)
1270 {
1271 	int numslots = fattrs->maxreqs;
1272 	int slotsize = slot_bytes(fattrs);
1273 	struct nfsd4_session *new;
1274 	int mem, i;
1275 
1276 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1277 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1278 	mem = numslots * sizeof(struct nfsd4_slot *);
1279 
1280 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1281 	if (!new)
1282 		return NULL;
1283 	/* allocate each struct nfsd4_slot and data cache in one piece */
1284 	for (i = 0; i < numslots; i++) {
1285 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1286 		if (!new->se_slots[i])
1287 			goto out_free;
1288 	}
1289 
1290 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1291 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1292 
1293 	return new;
1294 out_free:
1295 	while (i--)
1296 		kfree(new->se_slots[i]);
1297 	kfree(new);
1298 	return NULL;
1299 }
1300 
1301 static void free_conn(struct nfsd4_conn *c)
1302 {
1303 	svc_xprt_put(c->cn_xprt);
1304 	kfree(c);
1305 }
1306 
1307 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1308 {
1309 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1310 	struct nfs4_client *clp = c->cn_session->se_client;
1311 
1312 	spin_lock(&clp->cl_lock);
1313 	if (!list_empty(&c->cn_persession)) {
1314 		list_del(&c->cn_persession);
1315 		free_conn(c);
1316 	}
1317 	nfsd4_probe_callback(clp);
1318 	spin_unlock(&clp->cl_lock);
1319 }
1320 
1321 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1322 {
1323 	struct nfsd4_conn *conn;
1324 
1325 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1326 	if (!conn)
1327 		return NULL;
1328 	svc_xprt_get(rqstp->rq_xprt);
1329 	conn->cn_xprt = rqstp->rq_xprt;
1330 	conn->cn_flags = flags;
1331 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1332 	return conn;
1333 }
1334 
1335 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1336 {
1337 	conn->cn_session = ses;
1338 	list_add(&conn->cn_persession, &ses->se_conns);
1339 }
1340 
1341 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1342 {
1343 	struct nfs4_client *clp = ses->se_client;
1344 
1345 	spin_lock(&clp->cl_lock);
1346 	__nfsd4_hash_conn(conn, ses);
1347 	spin_unlock(&clp->cl_lock);
1348 }
1349 
1350 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1351 {
1352 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1353 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1354 }
1355 
1356 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1357 {
1358 	int ret;
1359 
1360 	nfsd4_hash_conn(conn, ses);
1361 	ret = nfsd4_register_conn(conn);
1362 	if (ret)
1363 		/* oops; xprt is already down: */
1364 		nfsd4_conn_lost(&conn->cn_xpt_user);
1365 	/* We may have gained or lost a callback channel: */
1366 	nfsd4_probe_callback_sync(ses->se_client);
1367 }
1368 
1369 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1370 {
1371 	u32 dir = NFS4_CDFC4_FORE;
1372 
1373 	if (cses->flags & SESSION4_BACK_CHAN)
1374 		dir |= NFS4_CDFC4_BACK;
1375 	return alloc_conn(rqstp, dir);
1376 }
1377 
1378 /* must be called under client_lock */
1379 static void nfsd4_del_conns(struct nfsd4_session *s)
1380 {
1381 	struct nfs4_client *clp = s->se_client;
1382 	struct nfsd4_conn *c;
1383 
1384 	spin_lock(&clp->cl_lock);
1385 	while (!list_empty(&s->se_conns)) {
1386 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1387 		list_del_init(&c->cn_persession);
1388 		spin_unlock(&clp->cl_lock);
1389 
1390 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1391 		free_conn(c);
1392 
1393 		spin_lock(&clp->cl_lock);
1394 	}
1395 	spin_unlock(&clp->cl_lock);
1396 }
1397 
1398 static void __free_session(struct nfsd4_session *ses)
1399 {
1400 	free_session_slots(ses);
1401 	kfree(ses);
1402 }
1403 
1404 static void free_session(struct nfsd4_session *ses)
1405 {
1406 	nfsd4_del_conns(ses);
1407 	nfsd4_put_drc_mem(&ses->se_fchannel);
1408 	__free_session(ses);
1409 }
1410 
1411 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1412 {
1413 	int idx;
1414 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1415 
1416 	new->se_client = clp;
1417 	gen_sessionid(new);
1418 
1419 	INIT_LIST_HEAD(&new->se_conns);
1420 
1421 	new->se_cb_seq_nr = 1;
1422 	new->se_flags = cses->flags;
1423 	new->se_cb_prog = cses->callback_prog;
1424 	new->se_cb_sec = cses->cb_sec;
1425 	atomic_set(&new->se_ref, 0);
1426 	idx = hash_sessionid(&new->se_sessionid);
1427 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1428 	spin_lock(&clp->cl_lock);
1429 	list_add(&new->se_perclnt, &clp->cl_sessions);
1430 	spin_unlock(&clp->cl_lock);
1431 
1432 	if (cses->flags & SESSION4_BACK_CHAN) {
1433 		struct sockaddr *sa = svc_addr(rqstp);
1434 		/*
1435 		 * This is a little silly; with sessions there's no real
1436 		 * use for the callback address.  Use the peer address
1437 		 * as a reasonable default for now, but consider fixing
1438 		 * the rpc client not to require an address in the
1439 		 * future:
1440 		 */
1441 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1442 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1443 	}
1444 }
1445 
1446 /* caller must hold client_lock */
1447 static struct nfsd4_session *
1448 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1449 {
1450 	struct nfsd4_session *elem;
1451 	int idx;
1452 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1453 
1454 	lockdep_assert_held(&nn->client_lock);
1455 
1456 	dump_sessionid(__func__, sessionid);
1457 	idx = hash_sessionid(sessionid);
1458 	/* Search in the appropriate list */
1459 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1460 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1461 			    NFS4_MAX_SESSIONID_LEN)) {
1462 			return elem;
1463 		}
1464 	}
1465 
1466 	dprintk("%s: session not found\n", __func__);
1467 	return NULL;
1468 }
1469 
1470 static struct nfsd4_session *
1471 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1472 		__be32 *ret)
1473 {
1474 	struct nfsd4_session *session;
1475 	__be32 status = nfserr_badsession;
1476 
1477 	session = __find_in_sessionid_hashtbl(sessionid, net);
1478 	if (!session)
1479 		goto out;
1480 	status = nfsd4_get_session_locked(session);
1481 	if (status)
1482 		session = NULL;
1483 out:
1484 	*ret = status;
1485 	return session;
1486 }
1487 
1488 /* caller must hold client_lock */
1489 static void
1490 unhash_session(struct nfsd4_session *ses)
1491 {
1492 	struct nfs4_client *clp = ses->se_client;
1493 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1494 
1495 	lockdep_assert_held(&nn->client_lock);
1496 
1497 	list_del(&ses->se_hash);
1498 	spin_lock(&ses->se_client->cl_lock);
1499 	list_del(&ses->se_perclnt);
1500 	spin_unlock(&ses->se_client->cl_lock);
1501 }
1502 
1503 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1504 static int
1505 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1506 {
1507 	if (clid->cl_boot == nn->boot_time)
1508 		return 0;
1509 	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1510 		clid->cl_boot, clid->cl_id, nn->boot_time);
1511 	return 1;
1512 }
1513 
1514 /*
1515  * XXX Should we use a slab cache ?
1516  * This type of memory management is somewhat inefficient, but we use it
1517  * anyway since SETCLIENTID is not a common operation.
1518  */
1519 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1520 {
1521 	struct nfs4_client *clp;
1522 	int i;
1523 
1524 	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1525 	if (clp == NULL)
1526 		return NULL;
1527 	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1528 	if (clp->cl_name.data == NULL)
1529 		goto err_no_name;
1530 	clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1531 			OWNER_HASH_SIZE, GFP_KERNEL);
1532 	if (!clp->cl_ownerstr_hashtbl)
1533 		goto err_no_hashtbl;
1534 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1535 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1536 	clp->cl_name.len = name.len;
1537 	INIT_LIST_HEAD(&clp->cl_sessions);
1538 	idr_init(&clp->cl_stateids);
1539 	atomic_set(&clp->cl_refcount, 0);
1540 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1541 	INIT_LIST_HEAD(&clp->cl_idhash);
1542 	INIT_LIST_HEAD(&clp->cl_openowners);
1543 	INIT_LIST_HEAD(&clp->cl_delegations);
1544 	INIT_LIST_HEAD(&clp->cl_lru);
1545 	INIT_LIST_HEAD(&clp->cl_callbacks);
1546 	INIT_LIST_HEAD(&clp->cl_revoked);
1547 	spin_lock_init(&clp->cl_lock);
1548 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1549 	return clp;
1550 err_no_hashtbl:
1551 	kfree(clp->cl_name.data);
1552 err_no_name:
1553 	kfree(clp);
1554 	return NULL;
1555 }
1556 
1557 static void
1558 free_client(struct nfs4_client *clp)
1559 {
1560 	while (!list_empty(&clp->cl_sessions)) {
1561 		struct nfsd4_session *ses;
1562 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1563 				se_perclnt);
1564 		list_del(&ses->se_perclnt);
1565 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
1566 		free_session(ses);
1567 	}
1568 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1569 	free_svc_cred(&clp->cl_cred);
1570 	kfree(clp->cl_ownerstr_hashtbl);
1571 	kfree(clp->cl_name.data);
1572 	idr_destroy(&clp->cl_stateids);
1573 	kfree(clp);
1574 }
1575 
1576 /* must be called under the client_lock */
1577 static void
1578 unhash_client_locked(struct nfs4_client *clp)
1579 {
1580 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1581 	struct nfsd4_session *ses;
1582 
1583 	lockdep_assert_held(&nn->client_lock);
1584 
1585 	/* Mark the client as expired! */
1586 	clp->cl_time = 0;
1587 	/* Make it invisible */
1588 	if (!list_empty(&clp->cl_idhash)) {
1589 		list_del_init(&clp->cl_idhash);
1590 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1591 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1592 		else
1593 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1594 	}
1595 	list_del_init(&clp->cl_lru);
1596 	spin_lock(&clp->cl_lock);
1597 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1598 		list_del_init(&ses->se_hash);
1599 	spin_unlock(&clp->cl_lock);
1600 }
1601 
1602 static void
1603 unhash_client(struct nfs4_client *clp)
1604 {
1605 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1606 
1607 	spin_lock(&nn->client_lock);
1608 	unhash_client_locked(clp);
1609 	spin_unlock(&nn->client_lock);
1610 }
1611 
1612 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1613 {
1614 	if (atomic_read(&clp->cl_refcount))
1615 		return nfserr_jukebox;
1616 	unhash_client_locked(clp);
1617 	return nfs_ok;
1618 }
1619 
1620 static void
1621 __destroy_client(struct nfs4_client *clp)
1622 {
1623 	struct nfs4_openowner *oo;
1624 	struct nfs4_delegation *dp;
1625 	struct list_head reaplist;
1626 
1627 	INIT_LIST_HEAD(&reaplist);
1628 	spin_lock(&state_lock);
1629 	while (!list_empty(&clp->cl_delegations)) {
1630 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1631 		unhash_delegation_locked(dp);
1632 		list_add(&dp->dl_recall_lru, &reaplist);
1633 	}
1634 	spin_unlock(&state_lock);
1635 	while (!list_empty(&reaplist)) {
1636 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1637 		list_del_init(&dp->dl_recall_lru);
1638 		nfs4_put_stid(&dp->dl_stid);
1639 	}
1640 	while (!list_empty(&clp->cl_revoked)) {
1641 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1642 		list_del_init(&dp->dl_recall_lru);
1643 		nfs4_put_stid(&dp->dl_stid);
1644 	}
1645 	while (!list_empty(&clp->cl_openowners)) {
1646 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1647 		atomic_inc(&oo->oo_owner.so_count);
1648 		release_openowner(oo);
1649 	}
1650 	nfsd4_shutdown_callback(clp);
1651 	if (clp->cl_cb_conn.cb_xprt)
1652 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1653 	free_client(clp);
1654 }
1655 
1656 static void
1657 destroy_client(struct nfs4_client *clp)
1658 {
1659 	unhash_client(clp);
1660 	__destroy_client(clp);
1661 }
1662 
1663 static void expire_client(struct nfs4_client *clp)
1664 {
1665 	unhash_client(clp);
1666 	nfsd4_client_record_remove(clp);
1667 	__destroy_client(clp);
1668 }
1669 
1670 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1671 {
1672 	memcpy(target->cl_verifier.data, source->data,
1673 			sizeof(target->cl_verifier.data));
1674 }
1675 
1676 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1677 {
1678 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1679 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1680 }
1681 
1682 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1683 {
1684 	if (source->cr_principal) {
1685 		target->cr_principal =
1686 				kstrdup(source->cr_principal, GFP_KERNEL);
1687 		if (target->cr_principal == NULL)
1688 			return -ENOMEM;
1689 	} else
1690 		target->cr_principal = NULL;
1691 	target->cr_flavor = source->cr_flavor;
1692 	target->cr_uid = source->cr_uid;
1693 	target->cr_gid = source->cr_gid;
1694 	target->cr_group_info = source->cr_group_info;
1695 	get_group_info(target->cr_group_info);
1696 	target->cr_gss_mech = source->cr_gss_mech;
1697 	if (source->cr_gss_mech)
1698 		gss_mech_get(source->cr_gss_mech);
1699 	return 0;
1700 }
1701 
1702 static long long
1703 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1704 {
1705 	long long res;
1706 
1707 	res = o1->len - o2->len;
1708 	if (res)
1709 		return res;
1710 	return (long long)memcmp(o1->data, o2->data, o1->len);
1711 }
1712 
1713 static int same_name(const char *n1, const char *n2)
1714 {
1715 	return 0 == memcmp(n1, n2, HEXDIR_LEN);
1716 }
1717 
1718 static int
1719 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1720 {
1721 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1722 }
1723 
1724 static int
1725 same_clid(clientid_t *cl1, clientid_t *cl2)
1726 {
1727 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1728 }
1729 
1730 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1731 {
1732 	int i;
1733 
1734 	if (g1->ngroups != g2->ngroups)
1735 		return false;
1736 	for (i=0; i<g1->ngroups; i++)
1737 		if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1738 			return false;
1739 	return true;
1740 }
1741 
1742 /*
1743  * RFC 3530 language requires clid_inuse be returned when the
1744  * "principal" associated with a requests differs from that previously
1745  * used.  We use uid, gid's, and gss principal string as our best
1746  * approximation.  We also don't want to allow non-gss use of a client
1747  * established using gss: in theory cr_principal should catch that
1748  * change, but in practice cr_principal can be null even in the gss case
1749  * since gssd doesn't always pass down a principal string.
1750  */
1751 static bool is_gss_cred(struct svc_cred *cr)
1752 {
1753 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
1754 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1755 }
1756 
1757 
1758 static bool
1759 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1760 {
1761 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1762 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1763 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1764 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1765 		return false;
1766 	if (cr1->cr_principal == cr2->cr_principal)
1767 		return true;
1768 	if (!cr1->cr_principal || !cr2->cr_principal)
1769 		return false;
1770 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1771 }
1772 
1773 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1774 {
1775 	struct svc_cred *cr = &rqstp->rq_cred;
1776 	u32 service;
1777 
1778 	if (!cr->cr_gss_mech)
1779 		return false;
1780 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1781 	return service == RPC_GSS_SVC_INTEGRITY ||
1782 	       service == RPC_GSS_SVC_PRIVACY;
1783 }
1784 
1785 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1786 {
1787 	struct svc_cred *cr = &rqstp->rq_cred;
1788 
1789 	if (!cl->cl_mach_cred)
1790 		return true;
1791 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1792 		return false;
1793 	if (!svc_rqst_integrity_protected(rqstp))
1794 		return false;
1795 	if (!cr->cr_principal)
1796 		return false;
1797 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1798 }
1799 
1800 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1801 {
1802 	__be32 verf[2];
1803 
1804 	/*
1805 	 * This is opaque to client, so no need to byte-swap. Use
1806 	 * __force to keep sparse happy
1807 	 */
1808 	verf[0] = (__force __be32)get_seconds();
1809 	verf[1] = (__force __be32)nn->clientid_counter;
1810 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1811 }
1812 
1813 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1814 {
1815 	clp->cl_clientid.cl_boot = nn->boot_time;
1816 	clp->cl_clientid.cl_id = nn->clientid_counter++;
1817 	gen_confirm(clp, nn);
1818 }
1819 
1820 static struct nfs4_stid *
1821 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1822 {
1823 	struct nfs4_stid *ret;
1824 
1825 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1826 	if (!ret || !ret->sc_type)
1827 		return NULL;
1828 	return ret;
1829 }
1830 
1831 static struct nfs4_stid *
1832 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1833 {
1834 	struct nfs4_stid *s;
1835 
1836 	spin_lock(&cl->cl_lock);
1837 	s = find_stateid_locked(cl, t);
1838 	if (s != NULL) {
1839 		if (typemask & s->sc_type)
1840 			atomic_inc(&s->sc_count);
1841 		else
1842 			s = NULL;
1843 	}
1844 	spin_unlock(&cl->cl_lock);
1845 	return s;
1846 }
1847 
1848 static struct nfs4_client *create_client(struct xdr_netobj name,
1849 		struct svc_rqst *rqstp, nfs4_verifier *verf)
1850 {
1851 	struct nfs4_client *clp;
1852 	struct sockaddr *sa = svc_addr(rqstp);
1853 	int ret;
1854 	struct net *net = SVC_NET(rqstp);
1855 
1856 	clp = alloc_client(name);
1857 	if (clp == NULL)
1858 		return NULL;
1859 
1860 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1861 	if (ret) {
1862 		free_client(clp);
1863 		return NULL;
1864 	}
1865 	INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null);
1866 	clp->cl_time = get_seconds();
1867 	clear_bit(0, &clp->cl_cb_slot_busy);
1868 	copy_verf(clp, verf);
1869 	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1870 	clp->cl_cb_session = NULL;
1871 	clp->net = net;
1872 	return clp;
1873 }
1874 
1875 static void
1876 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1877 {
1878 	struct rb_node **new = &(root->rb_node), *parent = NULL;
1879 	struct nfs4_client *clp;
1880 
1881 	while (*new) {
1882 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1883 		parent = *new;
1884 
1885 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1886 			new = &((*new)->rb_left);
1887 		else
1888 			new = &((*new)->rb_right);
1889 	}
1890 
1891 	rb_link_node(&new_clp->cl_namenode, parent, new);
1892 	rb_insert_color(&new_clp->cl_namenode, root);
1893 }
1894 
1895 static struct nfs4_client *
1896 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1897 {
1898 	long long cmp;
1899 	struct rb_node *node = root->rb_node;
1900 	struct nfs4_client *clp;
1901 
1902 	while (node) {
1903 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
1904 		cmp = compare_blob(&clp->cl_name, name);
1905 		if (cmp > 0)
1906 			node = node->rb_left;
1907 		else if (cmp < 0)
1908 			node = node->rb_right;
1909 		else
1910 			return clp;
1911 	}
1912 	return NULL;
1913 }
1914 
1915 static void
1916 add_to_unconfirmed(struct nfs4_client *clp)
1917 {
1918 	unsigned int idhashval;
1919 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1920 
1921 	lockdep_assert_held(&nn->client_lock);
1922 
1923 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1924 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1925 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1926 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1927 	renew_client_locked(clp);
1928 }
1929 
1930 static void
1931 move_to_confirmed(struct nfs4_client *clp)
1932 {
1933 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1934 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1935 
1936 	lockdep_assert_held(&nn->client_lock);
1937 
1938 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1939 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1940 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1941 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
1942 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1943 	renew_client_locked(clp);
1944 }
1945 
1946 static struct nfs4_client *
1947 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1948 {
1949 	struct nfs4_client *clp;
1950 	unsigned int idhashval = clientid_hashval(clid->cl_id);
1951 
1952 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1953 		if (same_clid(&clp->cl_clientid, clid)) {
1954 			if ((bool)clp->cl_minorversion != sessions)
1955 				return NULL;
1956 			renew_client_locked(clp);
1957 			return clp;
1958 		}
1959 	}
1960 	return NULL;
1961 }
1962 
1963 static struct nfs4_client *
1964 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1965 {
1966 	struct list_head *tbl = nn->conf_id_hashtbl;
1967 
1968 	lockdep_assert_held(&nn->client_lock);
1969 	return find_client_in_id_table(tbl, clid, sessions);
1970 }
1971 
1972 static struct nfs4_client *
1973 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1974 {
1975 	struct list_head *tbl = nn->unconf_id_hashtbl;
1976 
1977 	lockdep_assert_held(&nn->client_lock);
1978 	return find_client_in_id_table(tbl, clid, sessions);
1979 }
1980 
1981 static bool clp_used_exchangeid(struct nfs4_client *clp)
1982 {
1983 	return clp->cl_exchange_flags != 0;
1984 }
1985 
1986 static struct nfs4_client *
1987 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1988 {
1989 	lockdep_assert_held(&nn->client_lock);
1990 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
1991 }
1992 
1993 static struct nfs4_client *
1994 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1995 {
1996 	lockdep_assert_held(&nn->client_lock);
1997 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1998 }
1999 
2000 static void
2001 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2002 {
2003 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2004 	struct sockaddr	*sa = svc_addr(rqstp);
2005 	u32 scopeid = rpc_get_scope_id(sa);
2006 	unsigned short expected_family;
2007 
2008 	/* Currently, we only support tcp and tcp6 for the callback channel */
2009 	if (se->se_callback_netid_len == 3 &&
2010 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2011 		expected_family = AF_INET;
2012 	else if (se->se_callback_netid_len == 4 &&
2013 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2014 		expected_family = AF_INET6;
2015 	else
2016 		goto out_err;
2017 
2018 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2019 					    se->se_callback_addr_len,
2020 					    (struct sockaddr *)&conn->cb_addr,
2021 					    sizeof(conn->cb_addr));
2022 
2023 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2024 		goto out_err;
2025 
2026 	if (conn->cb_addr.ss_family == AF_INET6)
2027 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2028 
2029 	conn->cb_prog = se->se_callback_prog;
2030 	conn->cb_ident = se->se_callback_ident;
2031 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2032 	return;
2033 out_err:
2034 	conn->cb_addr.ss_family = AF_UNSPEC;
2035 	conn->cb_addrlen = 0;
2036 	dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
2037 		"will not receive delegations\n",
2038 		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2039 
2040 	return;
2041 }
2042 
2043 /*
2044  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2045  */
2046 static void
2047 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2048 {
2049 	struct xdr_buf *buf = resp->xdr.buf;
2050 	struct nfsd4_slot *slot = resp->cstate.slot;
2051 	unsigned int base;
2052 
2053 	dprintk("--> %s slot %p\n", __func__, slot);
2054 
2055 	slot->sl_opcnt = resp->opcnt;
2056 	slot->sl_status = resp->cstate.status;
2057 
2058 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2059 	if (nfsd4_not_cached(resp)) {
2060 		slot->sl_datalen = 0;
2061 		return;
2062 	}
2063 	base = resp->cstate.data_offset;
2064 	slot->sl_datalen = buf->len - base;
2065 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2066 		WARN("%s: sessions DRC could not cache compound\n", __func__);
2067 	return;
2068 }
2069 
2070 /*
2071  * Encode the replay sequence operation from the slot values.
2072  * If cachethis is FALSE encode the uncached rep error on the next
2073  * operation which sets resp->p and increments resp->opcnt for
2074  * nfs4svc_encode_compoundres.
2075  *
2076  */
2077 static __be32
2078 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2079 			  struct nfsd4_compoundres *resp)
2080 {
2081 	struct nfsd4_op *op;
2082 	struct nfsd4_slot *slot = resp->cstate.slot;
2083 
2084 	/* Encode the replayed sequence operation */
2085 	op = &args->ops[resp->opcnt - 1];
2086 	nfsd4_encode_operation(resp, op);
2087 
2088 	/* Return nfserr_retry_uncached_rep in next operation. */
2089 	if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2090 		op = &args->ops[resp->opcnt++];
2091 		op->status = nfserr_retry_uncached_rep;
2092 		nfsd4_encode_operation(resp, op);
2093 	}
2094 	return op->status;
2095 }
2096 
2097 /*
2098  * The sequence operation is not cached because we can use the slot and
2099  * session values.
2100  */
2101 static __be32
2102 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2103 			 struct nfsd4_sequence *seq)
2104 {
2105 	struct nfsd4_slot *slot = resp->cstate.slot;
2106 	struct xdr_stream *xdr = &resp->xdr;
2107 	__be32 *p;
2108 	__be32 status;
2109 
2110 	dprintk("--> %s slot %p\n", __func__, slot);
2111 
2112 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2113 	if (status)
2114 		return status;
2115 
2116 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2117 	if (!p) {
2118 		WARN_ON_ONCE(1);
2119 		return nfserr_serverfault;
2120 	}
2121 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2122 	xdr_commit_encode(xdr);
2123 
2124 	resp->opcnt = slot->sl_opcnt;
2125 	return slot->sl_status;
2126 }
2127 
2128 /*
2129  * Set the exchange_id flags returned by the server.
2130  */
2131 static void
2132 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2133 {
2134 	/* pNFS is not supported */
2135 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2136 
2137 	/* Referrals are supported, Migration is not. */
2138 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2139 
2140 	/* set the wire flags to return to client. */
2141 	clid->flags = new->cl_exchange_flags;
2142 }
2143 
2144 static bool client_has_state(struct nfs4_client *clp)
2145 {
2146 	/*
2147 	 * Note clp->cl_openowners check isn't quite right: there's no
2148 	 * need to count owners without stateid's.
2149 	 *
2150 	 * Also note we should probably be using this in 4.0 case too.
2151 	 */
2152 	return !list_empty(&clp->cl_openowners)
2153 		|| !list_empty(&clp->cl_delegations)
2154 		|| !list_empty(&clp->cl_sessions);
2155 }
2156 
2157 __be32
2158 nfsd4_exchange_id(struct svc_rqst *rqstp,
2159 		  struct nfsd4_compound_state *cstate,
2160 		  struct nfsd4_exchange_id *exid)
2161 {
2162 	struct nfs4_client *conf, *new;
2163 	struct nfs4_client *unconf = NULL;
2164 	__be32 status;
2165 	char			addr_str[INET6_ADDRSTRLEN];
2166 	nfs4_verifier		verf = exid->verifier;
2167 	struct sockaddr		*sa = svc_addr(rqstp);
2168 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2169 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2170 
2171 	rpc_ntop(sa, addr_str, sizeof(addr_str));
2172 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2173 		"ip_addr=%s flags %x, spa_how %d\n",
2174 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
2175 		addr_str, exid->flags, exid->spa_how);
2176 
2177 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2178 		return nfserr_inval;
2179 
2180 	switch (exid->spa_how) {
2181 	case SP4_MACH_CRED:
2182 		if (!svc_rqst_integrity_protected(rqstp))
2183 			return nfserr_inval;
2184 	case SP4_NONE:
2185 		break;
2186 	default:				/* checked by xdr code */
2187 		WARN_ON_ONCE(1);
2188 	case SP4_SSV:
2189 		return nfserr_encr_alg_unsupp;
2190 	}
2191 
2192 	new = create_client(exid->clname, rqstp, &verf);
2193 	if (new == NULL)
2194 		return nfserr_jukebox;
2195 
2196 	/* Cases below refer to rfc 5661 section 18.35.4: */
2197 	spin_lock(&nn->client_lock);
2198 	conf = find_confirmed_client_by_name(&exid->clname, nn);
2199 	if (conf) {
2200 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2201 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2202 
2203 		if (update) {
2204 			if (!clp_used_exchangeid(conf)) { /* buggy client */
2205 				status = nfserr_inval;
2206 				goto out;
2207 			}
2208 			if (!mach_creds_match(conf, rqstp)) {
2209 				status = nfserr_wrong_cred;
2210 				goto out;
2211 			}
2212 			if (!creds_match) { /* case 9 */
2213 				status = nfserr_perm;
2214 				goto out;
2215 			}
2216 			if (!verfs_match) { /* case 8 */
2217 				status = nfserr_not_same;
2218 				goto out;
2219 			}
2220 			/* case 6 */
2221 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2222 			goto out_copy;
2223 		}
2224 		if (!creds_match) { /* case 3 */
2225 			if (client_has_state(conf)) {
2226 				status = nfserr_clid_inuse;
2227 				goto out;
2228 			}
2229 			goto out_new;
2230 		}
2231 		if (verfs_match) { /* case 2 */
2232 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2233 			goto out_copy;
2234 		}
2235 		/* case 5, client reboot */
2236 		conf = NULL;
2237 		goto out_new;
2238 	}
2239 
2240 	if (update) { /* case 7 */
2241 		status = nfserr_noent;
2242 		goto out;
2243 	}
2244 
2245 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2246 	if (unconf) /* case 4, possible retry or client restart */
2247 		unhash_client_locked(unconf);
2248 
2249 	/* case 1 (normal case) */
2250 out_new:
2251 	if (conf) {
2252 		status = mark_client_expired_locked(conf);
2253 		if (status)
2254 			goto out;
2255 	}
2256 	new->cl_minorversion = cstate->minorversion;
2257 	new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
2258 
2259 	gen_clid(new, nn);
2260 	add_to_unconfirmed(new);
2261 	swap(new, conf);
2262 out_copy:
2263 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2264 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
2265 
2266 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2267 	nfsd4_set_ex_flags(conf, exid);
2268 
2269 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2270 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2271 	status = nfs_ok;
2272 
2273 out:
2274 	spin_unlock(&nn->client_lock);
2275 	if (new)
2276 		expire_client(new);
2277 	if (unconf)
2278 		expire_client(unconf);
2279 	return status;
2280 }
2281 
2282 static __be32
2283 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2284 {
2285 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2286 		slot_seqid);
2287 
2288 	/* The slot is in use, and no response has been sent. */
2289 	if (slot_inuse) {
2290 		if (seqid == slot_seqid)
2291 			return nfserr_jukebox;
2292 		else
2293 			return nfserr_seq_misordered;
2294 	}
2295 	/* Note unsigned 32-bit arithmetic handles wraparound: */
2296 	if (likely(seqid == slot_seqid + 1))
2297 		return nfs_ok;
2298 	if (seqid == slot_seqid)
2299 		return nfserr_replay_cache;
2300 	return nfserr_seq_misordered;
2301 }
2302 
2303 /*
2304  * Cache the create session result into the create session single DRC
2305  * slot cache by saving the xdr structure. sl_seqid has been set.
2306  * Do this for solo or embedded create session operations.
2307  */
2308 static void
2309 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2310 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2311 {
2312 	slot->sl_status = nfserr;
2313 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2314 }
2315 
2316 static __be32
2317 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2318 			    struct nfsd4_clid_slot *slot)
2319 {
2320 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2321 	return slot->sl_status;
2322 }
2323 
2324 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
2325 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2326 			1 +	/* MIN tag is length with zero, only length */ \
2327 			3 +	/* version, opcount, opcode */ \
2328 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2329 				/* seqid, slotID, slotID, cache */ \
2330 			4 ) * sizeof(__be32))
2331 
2332 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2333 			2 +	/* verifier: AUTH_NULL, length 0 */\
2334 			1 +	/* status */ \
2335 			1 +	/* MIN tag is length with zero, only length */ \
2336 			3 +	/* opcount, opcode, opstatus*/ \
2337 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2338 				/* seqid, slotID, slotID, slotID, status */ \
2339 			5 ) * sizeof(__be32))
2340 
2341 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2342 {
2343 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2344 
2345 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2346 		return nfserr_toosmall;
2347 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2348 		return nfserr_toosmall;
2349 	ca->headerpadsz = 0;
2350 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2351 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2352 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2353 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2354 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2355 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2356 	/*
2357 	 * Note decreasing slot size below client's request may make it
2358 	 * difficult for client to function correctly, whereas
2359 	 * decreasing the number of slots will (just?) affect
2360 	 * performance.  When short on memory we therefore prefer to
2361 	 * decrease number of slots instead of their size.  Clients that
2362 	 * request larger slots than they need will get poor results:
2363 	 */
2364 	ca->maxreqs = nfsd4_get_drc_mem(ca);
2365 	if (!ca->maxreqs)
2366 		return nfserr_jukebox;
2367 
2368 	return nfs_ok;
2369 }
2370 
2371 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
2372 				 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2373 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
2374 				 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2375 
2376 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2377 {
2378 	ca->headerpadsz = 0;
2379 
2380 	/*
2381 	 * These RPC_MAX_HEADER macros are overkill, especially since we
2382 	 * don't even do gss on the backchannel yet.  But this is still
2383 	 * less than 1k.  Tighten up this estimate in the unlikely event
2384 	 * it turns out to be a problem for some client:
2385 	 */
2386 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2387 		return nfserr_toosmall;
2388 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2389 		return nfserr_toosmall;
2390 	ca->maxresp_cached = 0;
2391 	if (ca->maxops < 2)
2392 		return nfserr_toosmall;
2393 
2394 	return nfs_ok;
2395 }
2396 
2397 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2398 {
2399 	switch (cbs->flavor) {
2400 	case RPC_AUTH_NULL:
2401 	case RPC_AUTH_UNIX:
2402 		return nfs_ok;
2403 	default:
2404 		/*
2405 		 * GSS case: the spec doesn't allow us to return this
2406 		 * error.  But it also doesn't allow us not to support
2407 		 * GSS.
2408 		 * I'd rather this fail hard than return some error the
2409 		 * client might think it can already handle:
2410 		 */
2411 		return nfserr_encr_alg_unsupp;
2412 	}
2413 }
2414 
2415 __be32
2416 nfsd4_create_session(struct svc_rqst *rqstp,
2417 		     struct nfsd4_compound_state *cstate,
2418 		     struct nfsd4_create_session *cr_ses)
2419 {
2420 	struct sockaddr *sa = svc_addr(rqstp);
2421 	struct nfs4_client *conf, *unconf;
2422 	struct nfs4_client *old = NULL;
2423 	struct nfsd4_session *new;
2424 	struct nfsd4_conn *conn;
2425 	struct nfsd4_clid_slot *cs_slot = NULL;
2426 	__be32 status = 0;
2427 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2428 
2429 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2430 		return nfserr_inval;
2431 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2432 	if (status)
2433 		return status;
2434 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2435 	if (status)
2436 		return status;
2437 	status = check_backchannel_attrs(&cr_ses->back_channel);
2438 	if (status)
2439 		goto out_release_drc_mem;
2440 	status = nfserr_jukebox;
2441 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2442 	if (!new)
2443 		goto out_release_drc_mem;
2444 	conn = alloc_conn_from_crses(rqstp, cr_ses);
2445 	if (!conn)
2446 		goto out_free_session;
2447 
2448 	spin_lock(&nn->client_lock);
2449 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2450 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2451 	WARN_ON_ONCE(conf && unconf);
2452 
2453 	if (conf) {
2454 		status = nfserr_wrong_cred;
2455 		if (!mach_creds_match(conf, rqstp))
2456 			goto out_free_conn;
2457 		cs_slot = &conf->cl_cs_slot;
2458 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2459 		if (status == nfserr_replay_cache) {
2460 			status = nfsd4_replay_create_session(cr_ses, cs_slot);
2461 			goto out_free_conn;
2462 		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
2463 			status = nfserr_seq_misordered;
2464 			goto out_free_conn;
2465 		}
2466 	} else if (unconf) {
2467 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2468 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2469 			status = nfserr_clid_inuse;
2470 			goto out_free_conn;
2471 		}
2472 		status = nfserr_wrong_cred;
2473 		if (!mach_creds_match(unconf, rqstp))
2474 			goto out_free_conn;
2475 		cs_slot = &unconf->cl_cs_slot;
2476 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2477 		if (status) {
2478 			/* an unconfirmed replay returns misordered */
2479 			status = nfserr_seq_misordered;
2480 			goto out_free_conn;
2481 		}
2482 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2483 		if (old) {
2484 			status = mark_client_expired_locked(old);
2485 			if (status) {
2486 				old = NULL;
2487 				goto out_free_conn;
2488 			}
2489 		}
2490 		move_to_confirmed(unconf);
2491 		conf = unconf;
2492 	} else {
2493 		status = nfserr_stale_clientid;
2494 		goto out_free_conn;
2495 	}
2496 	status = nfs_ok;
2497 	/*
2498 	 * We do not support RDMA or persistent sessions
2499 	 */
2500 	cr_ses->flags &= ~SESSION4_PERSIST;
2501 	cr_ses->flags &= ~SESSION4_RDMA;
2502 
2503 	init_session(rqstp, new, conf, cr_ses);
2504 	nfsd4_get_session_locked(new);
2505 
2506 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2507 	       NFS4_MAX_SESSIONID_LEN);
2508 	cs_slot->sl_seqid++;
2509 	cr_ses->seqid = cs_slot->sl_seqid;
2510 
2511 	/* cache solo and embedded create sessions under the client_lock */
2512 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
2513 	spin_unlock(&nn->client_lock);
2514 	/* init connection and backchannel */
2515 	nfsd4_init_conn(rqstp, conn, new);
2516 	nfsd4_put_session(new);
2517 	if (old)
2518 		expire_client(old);
2519 	return status;
2520 out_free_conn:
2521 	spin_unlock(&nn->client_lock);
2522 	free_conn(conn);
2523 	if (old)
2524 		expire_client(old);
2525 out_free_session:
2526 	__free_session(new);
2527 out_release_drc_mem:
2528 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
2529 	return status;
2530 }
2531 
2532 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2533 {
2534 	switch (*dir) {
2535 	case NFS4_CDFC4_FORE:
2536 	case NFS4_CDFC4_BACK:
2537 		return nfs_ok;
2538 	case NFS4_CDFC4_FORE_OR_BOTH:
2539 	case NFS4_CDFC4_BACK_OR_BOTH:
2540 		*dir = NFS4_CDFC4_BOTH;
2541 		return nfs_ok;
2542 	};
2543 	return nfserr_inval;
2544 }
2545 
2546 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2547 {
2548 	struct nfsd4_session *session = cstate->session;
2549 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2550 	__be32 status;
2551 
2552 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2553 	if (status)
2554 		return status;
2555 	spin_lock(&nn->client_lock);
2556 	session->se_cb_prog = bc->bc_cb_program;
2557 	session->se_cb_sec = bc->bc_cb_sec;
2558 	spin_unlock(&nn->client_lock);
2559 
2560 	nfsd4_probe_callback(session->se_client);
2561 
2562 	return nfs_ok;
2563 }
2564 
2565 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2566 		     struct nfsd4_compound_state *cstate,
2567 		     struct nfsd4_bind_conn_to_session *bcts)
2568 {
2569 	__be32 status;
2570 	struct nfsd4_conn *conn;
2571 	struct nfsd4_session *session;
2572 	struct net *net = SVC_NET(rqstp);
2573 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2574 
2575 	if (!nfsd4_last_compound_op(rqstp))
2576 		return nfserr_not_only_op;
2577 	spin_lock(&nn->client_lock);
2578 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2579 	spin_unlock(&nn->client_lock);
2580 	if (!session)
2581 		goto out_no_session;
2582 	status = nfserr_wrong_cred;
2583 	if (!mach_creds_match(session->se_client, rqstp))
2584 		goto out;
2585 	status = nfsd4_map_bcts_dir(&bcts->dir);
2586 	if (status)
2587 		goto out;
2588 	conn = alloc_conn(rqstp, bcts->dir);
2589 	status = nfserr_jukebox;
2590 	if (!conn)
2591 		goto out;
2592 	nfsd4_init_conn(rqstp, conn, session);
2593 	status = nfs_ok;
2594 out:
2595 	nfsd4_put_session(session);
2596 out_no_session:
2597 	return status;
2598 }
2599 
2600 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2601 {
2602 	if (!session)
2603 		return 0;
2604 	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2605 }
2606 
2607 __be32
2608 nfsd4_destroy_session(struct svc_rqst *r,
2609 		      struct nfsd4_compound_state *cstate,
2610 		      struct nfsd4_destroy_session *sessionid)
2611 {
2612 	struct nfsd4_session *ses;
2613 	__be32 status;
2614 	int ref_held_by_me = 0;
2615 	struct net *net = SVC_NET(r);
2616 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2617 
2618 	status = nfserr_not_only_op;
2619 	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2620 		if (!nfsd4_last_compound_op(r))
2621 			goto out;
2622 		ref_held_by_me++;
2623 	}
2624 	dump_sessionid(__func__, &sessionid->sessionid);
2625 	spin_lock(&nn->client_lock);
2626 	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2627 	if (!ses)
2628 		goto out_client_lock;
2629 	status = nfserr_wrong_cred;
2630 	if (!mach_creds_match(ses->se_client, r))
2631 		goto out_put_session;
2632 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2633 	if (status)
2634 		goto out_put_session;
2635 	unhash_session(ses);
2636 	spin_unlock(&nn->client_lock);
2637 
2638 	nfsd4_probe_callback_sync(ses->se_client);
2639 
2640 	spin_lock(&nn->client_lock);
2641 	status = nfs_ok;
2642 out_put_session:
2643 	nfsd4_put_session_locked(ses);
2644 out_client_lock:
2645 	spin_unlock(&nn->client_lock);
2646 out:
2647 	return status;
2648 }
2649 
2650 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2651 {
2652 	struct nfsd4_conn *c;
2653 
2654 	list_for_each_entry(c, &s->se_conns, cn_persession) {
2655 		if (c->cn_xprt == xpt) {
2656 			return c;
2657 		}
2658 	}
2659 	return NULL;
2660 }
2661 
2662 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2663 {
2664 	struct nfs4_client *clp = ses->se_client;
2665 	struct nfsd4_conn *c;
2666 	__be32 status = nfs_ok;
2667 	int ret;
2668 
2669 	spin_lock(&clp->cl_lock);
2670 	c = __nfsd4_find_conn(new->cn_xprt, ses);
2671 	if (c)
2672 		goto out_free;
2673 	status = nfserr_conn_not_bound_to_session;
2674 	if (clp->cl_mach_cred)
2675 		goto out_free;
2676 	__nfsd4_hash_conn(new, ses);
2677 	spin_unlock(&clp->cl_lock);
2678 	ret = nfsd4_register_conn(new);
2679 	if (ret)
2680 		/* oops; xprt is already down: */
2681 		nfsd4_conn_lost(&new->cn_xpt_user);
2682 	return nfs_ok;
2683 out_free:
2684 	spin_unlock(&clp->cl_lock);
2685 	free_conn(new);
2686 	return status;
2687 }
2688 
2689 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2690 {
2691 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
2692 
2693 	return args->opcnt > session->se_fchannel.maxops;
2694 }
2695 
2696 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2697 				  struct nfsd4_session *session)
2698 {
2699 	struct xdr_buf *xb = &rqstp->rq_arg;
2700 
2701 	return xb->len > session->se_fchannel.maxreq_sz;
2702 }
2703 
2704 __be32
2705 nfsd4_sequence(struct svc_rqst *rqstp,
2706 	       struct nfsd4_compound_state *cstate,
2707 	       struct nfsd4_sequence *seq)
2708 {
2709 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
2710 	struct xdr_stream *xdr = &resp->xdr;
2711 	struct nfsd4_session *session;
2712 	struct nfs4_client *clp;
2713 	struct nfsd4_slot *slot;
2714 	struct nfsd4_conn *conn;
2715 	__be32 status;
2716 	int buflen;
2717 	struct net *net = SVC_NET(rqstp);
2718 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2719 
2720 	if (resp->opcnt != 1)
2721 		return nfserr_sequence_pos;
2722 
2723 	/*
2724 	 * Will be either used or freed by nfsd4_sequence_check_conn
2725 	 * below.
2726 	 */
2727 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2728 	if (!conn)
2729 		return nfserr_jukebox;
2730 
2731 	spin_lock(&nn->client_lock);
2732 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2733 	if (!session)
2734 		goto out_no_session;
2735 	clp = session->se_client;
2736 
2737 	status = nfserr_too_many_ops;
2738 	if (nfsd4_session_too_many_ops(rqstp, session))
2739 		goto out_put_session;
2740 
2741 	status = nfserr_req_too_big;
2742 	if (nfsd4_request_too_big(rqstp, session))
2743 		goto out_put_session;
2744 
2745 	status = nfserr_badslot;
2746 	if (seq->slotid >= session->se_fchannel.maxreqs)
2747 		goto out_put_session;
2748 
2749 	slot = session->se_slots[seq->slotid];
2750 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
2751 
2752 	/* We do not negotiate the number of slots yet, so set the
2753 	 * maxslots to the session maxreqs which is used to encode
2754 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
2755 	seq->maxslots = session->se_fchannel.maxreqs;
2756 
2757 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2758 					slot->sl_flags & NFSD4_SLOT_INUSE);
2759 	if (status == nfserr_replay_cache) {
2760 		status = nfserr_seq_misordered;
2761 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2762 			goto out_put_session;
2763 		cstate->slot = slot;
2764 		cstate->session = session;
2765 		cstate->clp = clp;
2766 		/* Return the cached reply status and set cstate->status
2767 		 * for nfsd4_proc_compound processing */
2768 		status = nfsd4_replay_cache_entry(resp, seq);
2769 		cstate->status = nfserr_replay_cache;
2770 		goto out;
2771 	}
2772 	if (status)
2773 		goto out_put_session;
2774 
2775 	status = nfsd4_sequence_check_conn(conn, session);
2776 	conn = NULL;
2777 	if (status)
2778 		goto out_put_session;
2779 
2780 	buflen = (seq->cachethis) ?
2781 			session->se_fchannel.maxresp_cached :
2782 			session->se_fchannel.maxresp_sz;
2783 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2784 				    nfserr_rep_too_big;
2785 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2786 		goto out_put_session;
2787 	svc_reserve(rqstp, buflen);
2788 
2789 	status = nfs_ok;
2790 	/* Success! bump slot seqid */
2791 	slot->sl_seqid = seq->seqid;
2792 	slot->sl_flags |= NFSD4_SLOT_INUSE;
2793 	if (seq->cachethis)
2794 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2795 	else
2796 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2797 
2798 	cstate->slot = slot;
2799 	cstate->session = session;
2800 	cstate->clp = clp;
2801 
2802 out:
2803 	switch (clp->cl_cb_state) {
2804 	case NFSD4_CB_DOWN:
2805 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2806 		break;
2807 	case NFSD4_CB_FAULT:
2808 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2809 		break;
2810 	default:
2811 		seq->status_flags = 0;
2812 	}
2813 	if (!list_empty(&clp->cl_revoked))
2814 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2815 out_no_session:
2816 	if (conn)
2817 		free_conn(conn);
2818 	spin_unlock(&nn->client_lock);
2819 	return status;
2820 out_put_session:
2821 	nfsd4_put_session_locked(session);
2822 	goto out_no_session;
2823 }
2824 
2825 void
2826 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2827 {
2828 	struct nfsd4_compound_state *cs = &resp->cstate;
2829 
2830 	if (nfsd4_has_session(cs)) {
2831 		if (cs->status != nfserr_replay_cache) {
2832 			nfsd4_store_cache_entry(resp);
2833 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2834 		}
2835 		/* Drop session reference that was taken in nfsd4_sequence() */
2836 		nfsd4_put_session(cs->session);
2837 	} else if (cs->clp)
2838 		put_client_renew(cs->clp);
2839 }
2840 
2841 __be32
2842 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2843 {
2844 	struct nfs4_client *conf, *unconf;
2845 	struct nfs4_client *clp = NULL;
2846 	__be32 status = 0;
2847 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2848 
2849 	spin_lock(&nn->client_lock);
2850 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2851 	conf = find_confirmed_client(&dc->clientid, true, nn);
2852 	WARN_ON_ONCE(conf && unconf);
2853 
2854 	if (conf) {
2855 		if (client_has_state(conf)) {
2856 			status = nfserr_clientid_busy;
2857 			goto out;
2858 		}
2859 		status = mark_client_expired_locked(conf);
2860 		if (status)
2861 			goto out;
2862 		clp = conf;
2863 	} else if (unconf)
2864 		clp = unconf;
2865 	else {
2866 		status = nfserr_stale_clientid;
2867 		goto out;
2868 	}
2869 	if (!mach_creds_match(clp, rqstp)) {
2870 		clp = NULL;
2871 		status = nfserr_wrong_cred;
2872 		goto out;
2873 	}
2874 	unhash_client_locked(clp);
2875 out:
2876 	spin_unlock(&nn->client_lock);
2877 	if (clp)
2878 		expire_client(clp);
2879 	return status;
2880 }
2881 
2882 __be32
2883 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2884 {
2885 	__be32 status = 0;
2886 
2887 	if (rc->rca_one_fs) {
2888 		if (!cstate->current_fh.fh_dentry)
2889 			return nfserr_nofilehandle;
2890 		/*
2891 		 * We don't take advantage of the rca_one_fs case.
2892 		 * That's OK, it's optional, we can safely ignore it.
2893 		 */
2894 		 return nfs_ok;
2895 	}
2896 
2897 	status = nfserr_complete_already;
2898 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2899 			     &cstate->session->se_client->cl_flags))
2900 		goto out;
2901 
2902 	status = nfserr_stale_clientid;
2903 	if (is_client_expired(cstate->session->se_client))
2904 		/*
2905 		 * The following error isn't really legal.
2906 		 * But we only get here if the client just explicitly
2907 		 * destroyed the client.  Surely it no longer cares what
2908 		 * error it gets back on an operation for the dead
2909 		 * client.
2910 		 */
2911 		goto out;
2912 
2913 	status = nfs_ok;
2914 	nfsd4_client_record_create(cstate->session->se_client);
2915 out:
2916 	return status;
2917 }
2918 
2919 __be32
2920 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2921 		  struct nfsd4_setclientid *setclid)
2922 {
2923 	struct xdr_netobj 	clname = setclid->se_name;
2924 	nfs4_verifier		clverifier = setclid->se_verf;
2925 	struct nfs4_client	*conf, *new;
2926 	struct nfs4_client	*unconf = NULL;
2927 	__be32 			status;
2928 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2929 
2930 	new = create_client(clname, rqstp, &clverifier);
2931 	if (new == NULL)
2932 		return nfserr_jukebox;
2933 	/* Cases below refer to rfc 3530 section 14.2.33: */
2934 	spin_lock(&nn->client_lock);
2935 	conf = find_confirmed_client_by_name(&clname, nn);
2936 	if (conf) {
2937 		/* case 0: */
2938 		status = nfserr_clid_inuse;
2939 		if (clp_used_exchangeid(conf))
2940 			goto out;
2941 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2942 			char addr_str[INET6_ADDRSTRLEN];
2943 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2944 				 sizeof(addr_str));
2945 			dprintk("NFSD: setclientid: string in use by client "
2946 				"at %s\n", addr_str);
2947 			goto out;
2948 		}
2949 	}
2950 	unconf = find_unconfirmed_client_by_name(&clname, nn);
2951 	if (unconf)
2952 		unhash_client_locked(unconf);
2953 	if (conf && same_verf(&conf->cl_verifier, &clverifier))
2954 		/* case 1: probable callback update */
2955 		copy_clid(new, conf);
2956 	else /* case 4 (new client) or cases 2, 3 (client reboot): */
2957 		gen_clid(new, nn);
2958 	new->cl_minorversion = 0;
2959 	gen_callback(new, setclid, rqstp);
2960 	add_to_unconfirmed(new);
2961 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2962 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2963 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2964 	new = NULL;
2965 	status = nfs_ok;
2966 out:
2967 	spin_unlock(&nn->client_lock);
2968 	if (new)
2969 		free_client(new);
2970 	if (unconf)
2971 		expire_client(unconf);
2972 	return status;
2973 }
2974 
2975 
2976 __be32
2977 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2978 			 struct nfsd4_compound_state *cstate,
2979 			 struct nfsd4_setclientid_confirm *setclientid_confirm)
2980 {
2981 	struct nfs4_client *conf, *unconf;
2982 	struct nfs4_client *old = NULL;
2983 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2984 	clientid_t * clid = &setclientid_confirm->sc_clientid;
2985 	__be32 status;
2986 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2987 
2988 	if (STALE_CLIENTID(clid, nn))
2989 		return nfserr_stale_clientid;
2990 
2991 	spin_lock(&nn->client_lock);
2992 	conf = find_confirmed_client(clid, false, nn);
2993 	unconf = find_unconfirmed_client(clid, false, nn);
2994 	/*
2995 	 * We try hard to give out unique clientid's, so if we get an
2996 	 * attempt to confirm the same clientid with a different cred,
2997 	 * there's a bug somewhere.  Let's charitably assume it's our
2998 	 * bug.
2999 	 */
3000 	status = nfserr_serverfault;
3001 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3002 		goto out;
3003 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3004 		goto out;
3005 	/* cases below refer to rfc 3530 section 14.2.34: */
3006 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3007 		if (conf && !unconf) /* case 2: probable retransmit */
3008 			status = nfs_ok;
3009 		else /* case 4: client hasn't noticed we rebooted yet? */
3010 			status = nfserr_stale_clientid;
3011 		goto out;
3012 	}
3013 	status = nfs_ok;
3014 	if (conf) { /* case 1: callback update */
3015 		old = unconf;
3016 		unhash_client_locked(old);
3017 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3018 	} else { /* case 3: normal case; new or rebooted client */
3019 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3020 		if (old) {
3021 			status = mark_client_expired_locked(old);
3022 			if (status) {
3023 				old = NULL;
3024 				goto out;
3025 			}
3026 		}
3027 		move_to_confirmed(unconf);
3028 		conf = unconf;
3029 	}
3030 	get_client_locked(conf);
3031 	spin_unlock(&nn->client_lock);
3032 	nfsd4_probe_callback(conf);
3033 	spin_lock(&nn->client_lock);
3034 	put_client_renew_locked(conf);
3035 out:
3036 	spin_unlock(&nn->client_lock);
3037 	if (old)
3038 		expire_client(old);
3039 	return status;
3040 }
3041 
3042 static struct nfs4_file *nfsd4_alloc_file(void)
3043 {
3044 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
3045 }
3046 
3047 /* OPEN Share state helper functions */
3048 static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
3049 {
3050 	unsigned int hashval = file_hashval(fh);
3051 
3052 	lockdep_assert_held(&state_lock);
3053 
3054 	atomic_set(&fp->fi_ref, 1);
3055 	spin_lock_init(&fp->fi_lock);
3056 	INIT_LIST_HEAD(&fp->fi_stateids);
3057 	INIT_LIST_HEAD(&fp->fi_delegations);
3058 	fh_copy_shallow(&fp->fi_fhandle, fh);
3059 	fp->fi_had_conflict = false;
3060 	fp->fi_lease = NULL;
3061 	fp->fi_share_deny = 0;
3062 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3063 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
3064 	hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
3065 }
3066 
3067 void
3068 nfsd4_free_slabs(void)
3069 {
3070 	kmem_cache_destroy(openowner_slab);
3071 	kmem_cache_destroy(lockowner_slab);
3072 	kmem_cache_destroy(file_slab);
3073 	kmem_cache_destroy(stateid_slab);
3074 	kmem_cache_destroy(deleg_slab);
3075 }
3076 
3077 int
3078 nfsd4_init_slabs(void)
3079 {
3080 	openowner_slab = kmem_cache_create("nfsd4_openowners",
3081 			sizeof(struct nfs4_openowner), 0, 0, NULL);
3082 	if (openowner_slab == NULL)
3083 		goto out;
3084 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3085 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
3086 	if (lockowner_slab == NULL)
3087 		goto out_free_openowner_slab;
3088 	file_slab = kmem_cache_create("nfsd4_files",
3089 			sizeof(struct nfs4_file), 0, 0, NULL);
3090 	if (file_slab == NULL)
3091 		goto out_free_lockowner_slab;
3092 	stateid_slab = kmem_cache_create("nfsd4_stateids",
3093 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3094 	if (stateid_slab == NULL)
3095 		goto out_free_file_slab;
3096 	deleg_slab = kmem_cache_create("nfsd4_delegations",
3097 			sizeof(struct nfs4_delegation), 0, 0, NULL);
3098 	if (deleg_slab == NULL)
3099 		goto out_free_stateid_slab;
3100 	return 0;
3101 
3102 out_free_stateid_slab:
3103 	kmem_cache_destroy(stateid_slab);
3104 out_free_file_slab:
3105 	kmem_cache_destroy(file_slab);
3106 out_free_lockowner_slab:
3107 	kmem_cache_destroy(lockowner_slab);
3108 out_free_openowner_slab:
3109 	kmem_cache_destroy(openowner_slab);
3110 out:
3111 	dprintk("nfsd4: out of memory while initializing nfsv4\n");
3112 	return -ENOMEM;
3113 }
3114 
3115 static void init_nfs4_replay(struct nfs4_replay *rp)
3116 {
3117 	rp->rp_status = nfserr_serverfault;
3118 	rp->rp_buflen = 0;
3119 	rp->rp_buf = rp->rp_ibuf;
3120 	mutex_init(&rp->rp_mutex);
3121 }
3122 
3123 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3124 		struct nfs4_stateowner *so)
3125 {
3126 	if (!nfsd4_has_session(cstate)) {
3127 		mutex_lock(&so->so_replay.rp_mutex);
3128 		cstate->replay_owner = so;
3129 		atomic_inc(&so->so_count);
3130 	}
3131 }
3132 
3133 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3134 {
3135 	struct nfs4_stateowner *so = cstate->replay_owner;
3136 
3137 	if (so != NULL) {
3138 		cstate->replay_owner = NULL;
3139 		mutex_unlock(&so->so_replay.rp_mutex);
3140 		nfs4_put_stateowner(so);
3141 	}
3142 }
3143 
3144 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3145 {
3146 	struct nfs4_stateowner *sop;
3147 
3148 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
3149 	if (!sop)
3150 		return NULL;
3151 
3152 	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3153 	if (!sop->so_owner.data) {
3154 		kmem_cache_free(slab, sop);
3155 		return NULL;
3156 	}
3157 	sop->so_owner.len = owner->len;
3158 
3159 	INIT_LIST_HEAD(&sop->so_stateids);
3160 	sop->so_client = clp;
3161 	init_nfs4_replay(&sop->so_replay);
3162 	atomic_set(&sop->so_count, 1);
3163 	return sop;
3164 }
3165 
3166 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3167 {
3168 	lockdep_assert_held(&clp->cl_lock);
3169 
3170 	list_add(&oo->oo_owner.so_strhash,
3171 		 &clp->cl_ownerstr_hashtbl[strhashval]);
3172 	list_add(&oo->oo_perclient, &clp->cl_openowners);
3173 }
3174 
3175 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3176 {
3177 	unhash_openowner_locked(openowner(so));
3178 }
3179 
3180 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3181 {
3182 	struct nfs4_openowner *oo = openowner(so);
3183 
3184 	kmem_cache_free(openowner_slab, oo);
3185 }
3186 
3187 static const struct nfs4_stateowner_operations openowner_ops = {
3188 	.so_unhash =	nfs4_unhash_openowner,
3189 	.so_free =	nfs4_free_openowner,
3190 };
3191 
3192 static struct nfs4_openowner *
3193 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3194 			   struct nfsd4_compound_state *cstate)
3195 {
3196 	struct nfs4_client *clp = cstate->clp;
3197 	struct nfs4_openowner *oo, *ret;
3198 
3199 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3200 	if (!oo)
3201 		return NULL;
3202 	oo->oo_owner.so_ops = &openowner_ops;
3203 	oo->oo_owner.so_is_open_owner = 1;
3204 	oo->oo_owner.so_seqid = open->op_seqid;
3205 	oo->oo_flags = 0;
3206 	if (nfsd4_has_session(cstate))
3207 		oo->oo_flags |= NFS4_OO_CONFIRMED;
3208 	oo->oo_time = 0;
3209 	oo->oo_last_closed_stid = NULL;
3210 	INIT_LIST_HEAD(&oo->oo_close_lru);
3211 	spin_lock(&clp->cl_lock);
3212 	ret = find_openstateowner_str_locked(strhashval, open, clp);
3213 	if (ret == NULL) {
3214 		hash_openowner(oo, clp, strhashval);
3215 		ret = oo;
3216 	} else
3217 		nfs4_free_openowner(&oo->oo_owner);
3218 	spin_unlock(&clp->cl_lock);
3219 	return oo;
3220 }
3221 
3222 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
3223 	struct nfs4_openowner *oo = open->op_openowner;
3224 
3225 	atomic_inc(&stp->st_stid.sc_count);
3226 	stp->st_stid.sc_type = NFS4_OPEN_STID;
3227 	INIT_LIST_HEAD(&stp->st_locks);
3228 	stp->st_stateowner = &oo->oo_owner;
3229 	atomic_inc(&stp->st_stateowner->so_count);
3230 	get_nfs4_file(fp);
3231 	stp->st_stid.sc_file = fp;
3232 	stp->st_access_bmap = 0;
3233 	stp->st_deny_bmap = 0;
3234 	stp->st_openstp = NULL;
3235 	spin_lock(&oo->oo_owner.so_client->cl_lock);
3236 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3237 	spin_lock(&fp->fi_lock);
3238 	list_add(&stp->st_perfile, &fp->fi_stateids);
3239 	spin_unlock(&fp->fi_lock);
3240 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
3241 }
3242 
3243 /*
3244  * In the 4.0 case we need to keep the owners around a little while to handle
3245  * CLOSE replay. We still do need to release any file access that is held by
3246  * them before returning however.
3247  */
3248 static void
3249 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3250 {
3251 	struct nfs4_ol_stateid *last;
3252 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
3253 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3254 						nfsd_net_id);
3255 
3256 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3257 
3258 	/*
3259 	 * We know that we hold one reference via nfsd4_close, and another
3260 	 * "persistent" reference for the client. If the refcount is higher
3261 	 * than 2, then there are still calls in progress that are using this
3262 	 * stateid. We can't put the sc_file reference until they are finished.
3263 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
3264 	 * there should be no danger of the refcount going back up again at
3265 	 * this point.
3266 	 */
3267 	wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3268 
3269 	release_all_access(s);
3270 	if (s->st_stid.sc_file) {
3271 		put_nfs4_file(s->st_stid.sc_file);
3272 		s->st_stid.sc_file = NULL;
3273 	}
3274 
3275 	spin_lock(&nn->client_lock);
3276 	last = oo->oo_last_closed_stid;
3277 	oo->oo_last_closed_stid = s;
3278 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3279 	oo->oo_time = get_seconds();
3280 	spin_unlock(&nn->client_lock);
3281 	if (last)
3282 		nfs4_put_stid(&last->st_stid);
3283 }
3284 
3285 /* search file_hashtbl[] for file */
3286 static struct nfs4_file *
3287 find_file_locked(struct knfsd_fh *fh)
3288 {
3289 	unsigned int hashval = file_hashval(fh);
3290 	struct nfs4_file *fp;
3291 
3292 	lockdep_assert_held(&state_lock);
3293 
3294 	hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
3295 		if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
3296 			get_nfs4_file(fp);
3297 			return fp;
3298 		}
3299 	}
3300 	return NULL;
3301 }
3302 
3303 static struct nfs4_file *
3304 find_file(struct knfsd_fh *fh)
3305 {
3306 	struct nfs4_file *fp;
3307 
3308 	spin_lock(&state_lock);
3309 	fp = find_file_locked(fh);
3310 	spin_unlock(&state_lock);
3311 	return fp;
3312 }
3313 
3314 static struct nfs4_file *
3315 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3316 {
3317 	struct nfs4_file *fp;
3318 
3319 	spin_lock(&state_lock);
3320 	fp = find_file_locked(fh);
3321 	if (fp == NULL) {
3322 		nfsd4_init_file(new, fh);
3323 		fp = new;
3324 	}
3325 	spin_unlock(&state_lock);
3326 
3327 	return fp;
3328 }
3329 
3330 /*
3331  * Called to check deny when READ with all zero stateid or
3332  * WRITE with all zero or all one stateid
3333  */
3334 static __be32
3335 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3336 {
3337 	struct nfs4_file *fp;
3338 	__be32 ret = nfs_ok;
3339 
3340 	fp = find_file(&current_fh->fh_handle);
3341 	if (!fp)
3342 		return ret;
3343 	/* Check for conflicting share reservations */
3344 	spin_lock(&fp->fi_lock);
3345 	if (fp->fi_share_deny & deny_type)
3346 		ret = nfserr_locked;
3347 	spin_unlock(&fp->fi_lock);
3348 	put_nfs4_file(fp);
3349 	return ret;
3350 }
3351 
3352 void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp)
3353 {
3354 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3355 					  nfsd_net_id);
3356 
3357 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3358 
3359 	/*
3360 	 * We can't do this in nfsd_break_deleg_cb because it is
3361 	 * already holding inode->i_lock.
3362 	 *
3363 	 * If the dl_time != 0, then we know that it has already been
3364 	 * queued for a lease break. Don't queue it again.
3365 	 */
3366 	spin_lock(&state_lock);
3367 	if (dp->dl_time == 0) {
3368 		dp->dl_time = get_seconds();
3369 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3370 	}
3371 	spin_unlock(&state_lock);
3372 }
3373 
3374 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3375 {
3376 	/*
3377 	 * We're assuming the state code never drops its reference
3378 	 * without first removing the lease.  Since we're in this lease
3379 	 * callback (and since the lease code is serialized by the kernel
3380 	 * lock) we know the server hasn't removed the lease yet, we know
3381 	 * it's safe to take a reference.
3382 	 */
3383 	atomic_inc(&dp->dl_stid.sc_count);
3384 	nfsd4_cb_recall(dp);
3385 }
3386 
3387 /* Called from break_lease() with i_lock held. */
3388 static void nfsd_break_deleg_cb(struct file_lock *fl)
3389 {
3390 	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3391 	struct nfs4_delegation *dp;
3392 
3393 	if (!fp) {
3394 		WARN(1, "(%p)->fl_owner NULL\n", fl);
3395 		return;
3396 	}
3397 	if (fp->fi_had_conflict) {
3398 		WARN(1, "duplicate break on %p\n", fp);
3399 		return;
3400 	}
3401 	/*
3402 	 * We don't want the locks code to timeout the lease for us;
3403 	 * we'll remove it ourself if a delegation isn't returned
3404 	 * in time:
3405 	 */
3406 	fl->fl_break_time = 0;
3407 
3408 	spin_lock(&fp->fi_lock);
3409 	fp->fi_had_conflict = true;
3410 	/*
3411 	 * If there are no delegations on the list, then we can't count on this
3412 	 * lease ever being cleaned up. Set the fl_break_time to jiffies so that
3413 	 * time_out_leases will do it ASAP. The fact that fi_had_conflict is now
3414 	 * true should keep any new delegations from being hashed.
3415 	 */
3416 	if (list_empty(&fp->fi_delegations))
3417 		fl->fl_break_time = jiffies;
3418 	else
3419 		list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3420 			nfsd_break_one_deleg(dp);
3421 	spin_unlock(&fp->fi_lock);
3422 }
3423 
3424 static
3425 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
3426 {
3427 	if (arg & F_UNLCK)
3428 		return lease_modify(onlist, arg);
3429 	else
3430 		return -EAGAIN;
3431 }
3432 
3433 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3434 	.lm_break = nfsd_break_deleg_cb,
3435 	.lm_change = nfsd_change_deleg_cb,
3436 };
3437 
3438 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3439 {
3440 	if (nfsd4_has_session(cstate))
3441 		return nfs_ok;
3442 	if (seqid == so->so_seqid - 1)
3443 		return nfserr_replay_me;
3444 	if (seqid == so->so_seqid)
3445 		return nfs_ok;
3446 	return nfserr_bad_seqid;
3447 }
3448 
3449 static __be32 lookup_clientid(clientid_t *clid,
3450 		struct nfsd4_compound_state *cstate,
3451 		struct nfsd_net *nn)
3452 {
3453 	struct nfs4_client *found;
3454 
3455 	if (cstate->clp) {
3456 		found = cstate->clp;
3457 		if (!same_clid(&found->cl_clientid, clid))
3458 			return nfserr_stale_clientid;
3459 		return nfs_ok;
3460 	}
3461 
3462 	if (STALE_CLIENTID(clid, nn))
3463 		return nfserr_stale_clientid;
3464 
3465 	/*
3466 	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3467 	 * cached already then we know this is for is for v4.0 and "sessions"
3468 	 * will be false.
3469 	 */
3470 	WARN_ON_ONCE(cstate->session);
3471 	spin_lock(&nn->client_lock);
3472 	found = find_confirmed_client(clid, false, nn);
3473 	if (!found) {
3474 		spin_unlock(&nn->client_lock);
3475 		return nfserr_expired;
3476 	}
3477 	atomic_inc(&found->cl_refcount);
3478 	spin_unlock(&nn->client_lock);
3479 
3480 	/* Cache the nfs4_client in cstate! */
3481 	cstate->clp = found;
3482 	return nfs_ok;
3483 }
3484 
3485 __be32
3486 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3487 		    struct nfsd4_open *open, struct nfsd_net *nn)
3488 {
3489 	clientid_t *clientid = &open->op_clientid;
3490 	struct nfs4_client *clp = NULL;
3491 	unsigned int strhashval;
3492 	struct nfs4_openowner *oo = NULL;
3493 	__be32 status;
3494 
3495 	if (STALE_CLIENTID(&open->op_clientid, nn))
3496 		return nfserr_stale_clientid;
3497 	/*
3498 	 * In case we need it later, after we've already created the
3499 	 * file and don't want to risk a further failure:
3500 	 */
3501 	open->op_file = nfsd4_alloc_file();
3502 	if (open->op_file == NULL)
3503 		return nfserr_jukebox;
3504 
3505 	status = lookup_clientid(clientid, cstate, nn);
3506 	if (status)
3507 		return status;
3508 	clp = cstate->clp;
3509 
3510 	strhashval = ownerstr_hashval(&open->op_owner);
3511 	oo = find_openstateowner_str(strhashval, open, clp);
3512 	open->op_openowner = oo;
3513 	if (!oo) {
3514 		goto new_owner;
3515 	}
3516 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3517 		/* Replace unconfirmed owners without checking for replay. */
3518 		release_openowner(oo);
3519 		open->op_openowner = NULL;
3520 		goto new_owner;
3521 	}
3522 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3523 	if (status)
3524 		return status;
3525 	goto alloc_stateid;
3526 new_owner:
3527 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
3528 	if (oo == NULL)
3529 		return nfserr_jukebox;
3530 	open->op_openowner = oo;
3531 alloc_stateid:
3532 	open->op_stp = nfs4_alloc_open_stateid(clp);
3533 	if (!open->op_stp)
3534 		return nfserr_jukebox;
3535 	return nfs_ok;
3536 }
3537 
3538 static inline __be32
3539 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3540 {
3541 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3542 		return nfserr_openmode;
3543 	else
3544 		return nfs_ok;
3545 }
3546 
3547 static int share_access_to_flags(u32 share_access)
3548 {
3549 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3550 }
3551 
3552 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3553 {
3554 	struct nfs4_stid *ret;
3555 
3556 	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3557 	if (!ret)
3558 		return NULL;
3559 	return delegstateid(ret);
3560 }
3561 
3562 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3563 {
3564 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3565 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3566 }
3567 
3568 static __be32
3569 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3570 		struct nfs4_delegation **dp)
3571 {
3572 	int flags;
3573 	__be32 status = nfserr_bad_stateid;
3574 	struct nfs4_delegation *deleg;
3575 
3576 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3577 	if (deleg == NULL)
3578 		goto out;
3579 	flags = share_access_to_flags(open->op_share_access);
3580 	status = nfs4_check_delegmode(deleg, flags);
3581 	if (status) {
3582 		nfs4_put_stid(&deleg->dl_stid);
3583 		goto out;
3584 	}
3585 	*dp = deleg;
3586 out:
3587 	if (!nfsd4_is_deleg_cur(open))
3588 		return nfs_ok;
3589 	if (status)
3590 		return status;
3591 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3592 	return nfs_ok;
3593 }
3594 
3595 static struct nfs4_ol_stateid *
3596 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3597 {
3598 	struct nfs4_ol_stateid *local, *ret = NULL;
3599 	struct nfs4_openowner *oo = open->op_openowner;
3600 
3601 	spin_lock(&fp->fi_lock);
3602 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3603 		/* ignore lock owners */
3604 		if (local->st_stateowner->so_is_open_owner == 0)
3605 			continue;
3606 		if (local->st_stateowner == &oo->oo_owner) {
3607 			ret = local;
3608 			atomic_inc(&ret->st_stid.sc_count);
3609 			break;
3610 		}
3611 	}
3612 	spin_unlock(&fp->fi_lock);
3613 	return ret;
3614 }
3615 
3616 static inline int nfs4_access_to_access(u32 nfs4_access)
3617 {
3618 	int flags = 0;
3619 
3620 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3621 		flags |= NFSD_MAY_READ;
3622 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3623 		flags |= NFSD_MAY_WRITE;
3624 	return flags;
3625 }
3626 
3627 static inline __be32
3628 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3629 		struct nfsd4_open *open)
3630 {
3631 	struct iattr iattr = {
3632 		.ia_valid = ATTR_SIZE,
3633 		.ia_size = 0,
3634 	};
3635 	if (!open->op_truncate)
3636 		return 0;
3637 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3638 		return nfserr_inval;
3639 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3640 }
3641 
3642 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3643 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3644 		struct nfsd4_open *open)
3645 {
3646 	struct file *filp = NULL;
3647 	__be32 status;
3648 	int oflag = nfs4_access_to_omode(open->op_share_access);
3649 	int access = nfs4_access_to_access(open->op_share_access);
3650 	unsigned char old_access_bmap, old_deny_bmap;
3651 
3652 	spin_lock(&fp->fi_lock);
3653 
3654 	/*
3655 	 * Are we trying to set a deny mode that would conflict with
3656 	 * current access?
3657 	 */
3658 	status = nfs4_file_check_deny(fp, open->op_share_deny);
3659 	if (status != nfs_ok) {
3660 		spin_unlock(&fp->fi_lock);
3661 		goto out;
3662 	}
3663 
3664 	/* set access to the file */
3665 	status = nfs4_file_get_access(fp, open->op_share_access);
3666 	if (status != nfs_ok) {
3667 		spin_unlock(&fp->fi_lock);
3668 		goto out;
3669 	}
3670 
3671 	/* Set access bits in stateid */
3672 	old_access_bmap = stp->st_access_bmap;
3673 	set_access(open->op_share_access, stp);
3674 
3675 	/* Set new deny mask */
3676 	old_deny_bmap = stp->st_deny_bmap;
3677 	set_deny(open->op_share_deny, stp);
3678 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3679 
3680 	if (!fp->fi_fds[oflag]) {
3681 		spin_unlock(&fp->fi_lock);
3682 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3683 		if (status)
3684 			goto out_put_access;
3685 		spin_lock(&fp->fi_lock);
3686 		if (!fp->fi_fds[oflag]) {
3687 			fp->fi_fds[oflag] = filp;
3688 			filp = NULL;
3689 		}
3690 	}
3691 	spin_unlock(&fp->fi_lock);
3692 	if (filp)
3693 		fput(filp);
3694 
3695 	status = nfsd4_truncate(rqstp, cur_fh, open);
3696 	if (status)
3697 		goto out_put_access;
3698 out:
3699 	return status;
3700 out_put_access:
3701 	stp->st_access_bmap = old_access_bmap;
3702 	nfs4_file_put_access(fp, open->op_share_access);
3703 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
3704 	goto out;
3705 }
3706 
3707 static __be32
3708 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3709 {
3710 	__be32 status;
3711 	unsigned char old_deny_bmap;
3712 
3713 	if (!test_access(open->op_share_access, stp))
3714 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3715 
3716 	/* test and set deny mode */
3717 	spin_lock(&fp->fi_lock);
3718 	status = nfs4_file_check_deny(fp, open->op_share_deny);
3719 	if (status == nfs_ok) {
3720 		old_deny_bmap = stp->st_deny_bmap;
3721 		set_deny(open->op_share_deny, stp);
3722 		fp->fi_share_deny |=
3723 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3724 	}
3725 	spin_unlock(&fp->fi_lock);
3726 
3727 	if (status != nfs_ok)
3728 		return status;
3729 
3730 	status = nfsd4_truncate(rqstp, cur_fh, open);
3731 	if (status != nfs_ok)
3732 		reset_union_bmap_deny(old_deny_bmap, stp);
3733 	return status;
3734 }
3735 
3736 static void
3737 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
3738 {
3739 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3740 }
3741 
3742 /* Should we give out recallable state?: */
3743 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3744 {
3745 	if (clp->cl_cb_state == NFSD4_CB_UP)
3746 		return true;
3747 	/*
3748 	 * In the sessions case, since we don't have to establish a
3749 	 * separate connection for callbacks, we assume it's OK
3750 	 * until we hear otherwise:
3751 	 */
3752 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3753 }
3754 
3755 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
3756 {
3757 	struct file_lock *fl;
3758 
3759 	fl = locks_alloc_lock();
3760 	if (!fl)
3761 		return NULL;
3762 	locks_init_lock(fl);
3763 	fl->fl_lmops = &nfsd_lease_mng_ops;
3764 	fl->fl_flags = FL_DELEG;
3765 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3766 	fl->fl_end = OFFSET_MAX;
3767 	fl->fl_owner = (fl_owner_t)fp;
3768 	fl->fl_pid = current->tgid;
3769 	return fl;
3770 }
3771 
3772 static int nfs4_setlease(struct nfs4_delegation *dp)
3773 {
3774 	struct nfs4_file *fp = dp->dl_stid.sc_file;
3775 	struct file_lock *fl;
3776 	struct file *filp;
3777 	int status = 0;
3778 
3779 	fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
3780 	if (!fl)
3781 		return -ENOMEM;
3782 	filp = find_readable_file(fp);
3783 	if (!filp) {
3784 		/* We should always have a readable file here */
3785 		WARN_ON_ONCE(1);
3786 		return -EBADF;
3787 	}
3788 	fl->fl_file = filp;
3789 	status = vfs_setlease(filp, fl->fl_type, &fl);
3790 	if (status) {
3791 		locks_free_lock(fl);
3792 		goto out_fput;
3793 	}
3794 	spin_lock(&state_lock);
3795 	spin_lock(&fp->fi_lock);
3796 	/* Did the lease get broken before we took the lock? */
3797 	status = -EAGAIN;
3798 	if (fp->fi_had_conflict)
3799 		goto out_unlock;
3800 	/* Race breaker */
3801 	if (fp->fi_lease) {
3802 		status = 0;
3803 		atomic_inc(&fp->fi_delegees);
3804 		hash_delegation_locked(dp, fp);
3805 		goto out_unlock;
3806 	}
3807 	fp->fi_lease = fl;
3808 	fp->fi_deleg_file = filp;
3809 	atomic_set(&fp->fi_delegees, 1);
3810 	hash_delegation_locked(dp, fp);
3811 	spin_unlock(&fp->fi_lock);
3812 	spin_unlock(&state_lock);
3813 	return 0;
3814 out_unlock:
3815 	spin_unlock(&fp->fi_lock);
3816 	spin_unlock(&state_lock);
3817 out_fput:
3818 	fput(filp);
3819 	return status;
3820 }
3821 
3822 static struct nfs4_delegation *
3823 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3824 		    struct nfs4_file *fp)
3825 {
3826 	int status;
3827 	struct nfs4_delegation *dp;
3828 
3829 	if (fp->fi_had_conflict)
3830 		return ERR_PTR(-EAGAIN);
3831 
3832 	dp = alloc_init_deleg(clp, fh);
3833 	if (!dp)
3834 		return ERR_PTR(-ENOMEM);
3835 
3836 	get_nfs4_file(fp);
3837 	spin_lock(&state_lock);
3838 	spin_lock(&fp->fi_lock);
3839 	dp->dl_stid.sc_file = fp;
3840 	if (!fp->fi_lease) {
3841 		spin_unlock(&fp->fi_lock);
3842 		spin_unlock(&state_lock);
3843 		status = nfs4_setlease(dp);
3844 		goto out;
3845 	}
3846 	atomic_inc(&fp->fi_delegees);
3847 	if (fp->fi_had_conflict) {
3848 		status = -EAGAIN;
3849 		goto out_unlock;
3850 	}
3851 	hash_delegation_locked(dp, fp);
3852 	status = 0;
3853 out_unlock:
3854 	spin_unlock(&fp->fi_lock);
3855 	spin_unlock(&state_lock);
3856 out:
3857 	if (status) {
3858 		nfs4_put_stid(&dp->dl_stid);
3859 		return ERR_PTR(status);
3860 	}
3861 	return dp;
3862 }
3863 
3864 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3865 {
3866 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3867 	if (status == -EAGAIN)
3868 		open->op_why_no_deleg = WND4_CONTENTION;
3869 	else {
3870 		open->op_why_no_deleg = WND4_RESOURCE;
3871 		switch (open->op_deleg_want) {
3872 		case NFS4_SHARE_WANT_READ_DELEG:
3873 		case NFS4_SHARE_WANT_WRITE_DELEG:
3874 		case NFS4_SHARE_WANT_ANY_DELEG:
3875 			break;
3876 		case NFS4_SHARE_WANT_CANCEL:
3877 			open->op_why_no_deleg = WND4_CANCELLED;
3878 			break;
3879 		case NFS4_SHARE_WANT_NO_DELEG:
3880 			WARN_ON_ONCE(1);
3881 		}
3882 	}
3883 }
3884 
3885 /*
3886  * Attempt to hand out a delegation.
3887  *
3888  * Note we don't support write delegations, and won't until the vfs has
3889  * proper support for them.
3890  */
3891 static void
3892 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
3893 			struct nfs4_ol_stateid *stp)
3894 {
3895 	struct nfs4_delegation *dp;
3896 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
3897 	struct nfs4_client *clp = stp->st_stid.sc_client;
3898 	int cb_up;
3899 	int status = 0;
3900 
3901 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3902 	open->op_recall = 0;
3903 	switch (open->op_claim_type) {
3904 		case NFS4_OPEN_CLAIM_PREVIOUS:
3905 			if (!cb_up)
3906 				open->op_recall = 1;
3907 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
3908 				goto out_no_deleg;
3909 			break;
3910 		case NFS4_OPEN_CLAIM_NULL:
3911 		case NFS4_OPEN_CLAIM_FH:
3912 			/*
3913 			 * Let's not give out any delegations till everyone's
3914 			 * had the chance to reclaim theirs....
3915 			 */
3916 			if (locks_in_grace(clp->net))
3917 				goto out_no_deleg;
3918 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3919 				goto out_no_deleg;
3920 			/*
3921 			 * Also, if the file was opened for write or
3922 			 * create, there's a good chance the client's
3923 			 * about to write to it, resulting in an
3924 			 * immediate recall (since we don't support
3925 			 * write delegations):
3926 			 */
3927 			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3928 				goto out_no_deleg;
3929 			if (open->op_create == NFS4_OPEN_CREATE)
3930 				goto out_no_deleg;
3931 			break;
3932 		default:
3933 			goto out_no_deleg;
3934 	}
3935 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
3936 	if (IS_ERR(dp))
3937 		goto out_no_deleg;
3938 
3939 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3940 
3941 	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3942 		STATEID_VAL(&dp->dl_stid.sc_stateid));
3943 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3944 	nfs4_put_stid(&dp->dl_stid);
3945 	return;
3946 out_no_deleg:
3947 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
3948 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3949 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3950 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3951 		open->op_recall = 1;
3952 	}
3953 
3954 	/* 4.1 client asking for a delegation? */
3955 	if (open->op_deleg_want)
3956 		nfsd4_open_deleg_none_ext(open, status);
3957 	return;
3958 }
3959 
3960 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
3961 					struct nfs4_delegation *dp)
3962 {
3963 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3964 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3965 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3966 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3967 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3968 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3969 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3970 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3971 	}
3972 	/* Otherwise the client must be confused wanting a delegation
3973 	 * it already has, therefore we don't return
3974 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3975 	 */
3976 }
3977 
3978 __be32
3979 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3980 {
3981 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3982 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3983 	struct nfs4_file *fp = NULL;
3984 	struct nfs4_ol_stateid *stp = NULL;
3985 	struct nfs4_delegation *dp = NULL;
3986 	__be32 status;
3987 
3988 	/*
3989 	 * Lookup file; if found, lookup stateid and check open request,
3990 	 * and check for delegations in the process of being recalled.
3991 	 * If not found, create the nfs4_file struct
3992 	 */
3993 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
3994 	if (fp != open->op_file) {
3995 		status = nfs4_check_deleg(cl, open, &dp);
3996 		if (status)
3997 			goto out;
3998 		stp = nfsd4_find_existing_open(fp, open);
3999 	} else {
4000 		open->op_file = NULL;
4001 		status = nfserr_bad_stateid;
4002 		if (nfsd4_is_deleg_cur(open))
4003 			goto out;
4004 		status = nfserr_jukebox;
4005 	}
4006 
4007 	/*
4008 	 * OPEN the file, or upgrade an existing OPEN.
4009 	 * If truncate fails, the OPEN fails.
4010 	 */
4011 	if (stp) {
4012 		/* Stateid was found, this is an OPEN upgrade */
4013 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4014 		if (status)
4015 			goto out;
4016 	} else {
4017 		stp = open->op_stp;
4018 		open->op_stp = NULL;
4019 		init_open_stateid(stp, fp, open);
4020 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4021 		if (status) {
4022 			release_open_stateid(stp);
4023 			goto out;
4024 		}
4025 	}
4026 	update_stateid(&stp->st_stid.sc_stateid);
4027 	memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4028 
4029 	if (nfsd4_has_session(&resp->cstate)) {
4030 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4031 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4032 			open->op_why_no_deleg = WND4_NOT_WANTED;
4033 			goto nodeleg;
4034 		}
4035 	}
4036 
4037 	/*
4038 	* Attempt to hand out a delegation. No error return, because the
4039 	* OPEN succeeds even if we fail.
4040 	*/
4041 	nfs4_open_delegation(current_fh, open, stp);
4042 nodeleg:
4043 	status = nfs_ok;
4044 
4045 	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4046 		STATEID_VAL(&stp->st_stid.sc_stateid));
4047 out:
4048 	/* 4.1 client trying to upgrade/downgrade delegation? */
4049 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4050 	    open->op_deleg_want)
4051 		nfsd4_deleg_xgrade_none_ext(open, dp);
4052 
4053 	if (fp)
4054 		put_nfs4_file(fp);
4055 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4056 		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
4057 	/*
4058 	* To finish the open response, we just need to set the rflags.
4059 	*/
4060 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4061 	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
4062 	    !nfsd4_has_session(&resp->cstate))
4063 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4064 	if (dp)
4065 		nfs4_put_stid(&dp->dl_stid);
4066 	if (stp)
4067 		nfs4_put_stid(&stp->st_stid);
4068 
4069 	return status;
4070 }
4071 
4072 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4073 			      struct nfsd4_open *open, __be32 status)
4074 {
4075 	if (open->op_openowner) {
4076 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4077 
4078 		nfsd4_cstate_assign_replay(cstate, so);
4079 		nfs4_put_stateowner(so);
4080 	}
4081 	if (open->op_file)
4082 		nfsd4_free_file(open->op_file);
4083 	if (open->op_stp)
4084 		nfs4_put_stid(&open->op_stp->st_stid);
4085 }
4086 
4087 __be32
4088 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4089 	    clientid_t *clid)
4090 {
4091 	struct nfs4_client *clp;
4092 	__be32 status;
4093 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4094 
4095 	dprintk("process_renew(%08x/%08x): starting\n",
4096 			clid->cl_boot, clid->cl_id);
4097 	status = lookup_clientid(clid, cstate, nn);
4098 	if (status)
4099 		goto out;
4100 	clp = cstate->clp;
4101 	status = nfserr_cb_path_down;
4102 	if (!list_empty(&clp->cl_delegations)
4103 			&& clp->cl_cb_state != NFSD4_CB_UP)
4104 		goto out;
4105 	status = nfs_ok;
4106 out:
4107 	return status;
4108 }
4109 
4110 static void
4111 nfsd4_end_grace(struct nfsd_net *nn)
4112 {
4113 	/* do nothing if grace period already ended */
4114 	if (nn->grace_ended)
4115 		return;
4116 
4117 	dprintk("NFSD: end of grace period\n");
4118 	nn->grace_ended = true;
4119 	nfsd4_record_grace_done(nn, nn->boot_time);
4120 	locks_end_grace(&nn->nfsd4_manager);
4121 	/*
4122 	 * Now that every NFSv4 client has had the chance to recover and
4123 	 * to see the (possibly new, possibly shorter) lease time, we
4124 	 * can safely set the next grace time to the current lease time:
4125 	 */
4126 	nn->nfsd4_grace = nn->nfsd4_lease;
4127 }
4128 
4129 static time_t
4130 nfs4_laundromat(struct nfsd_net *nn)
4131 {
4132 	struct nfs4_client *clp;
4133 	struct nfs4_openowner *oo;
4134 	struct nfs4_delegation *dp;
4135 	struct nfs4_ol_stateid *stp;
4136 	struct list_head *pos, *next, reaplist;
4137 	time_t cutoff = get_seconds() - nn->nfsd4_lease;
4138 	time_t t, new_timeo = nn->nfsd4_lease;
4139 
4140 	dprintk("NFSD: laundromat service - starting\n");
4141 	nfsd4_end_grace(nn);
4142 	INIT_LIST_HEAD(&reaplist);
4143 	spin_lock(&nn->client_lock);
4144 	list_for_each_safe(pos, next, &nn->client_lru) {
4145 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4146 		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4147 			t = clp->cl_time - cutoff;
4148 			new_timeo = min(new_timeo, t);
4149 			break;
4150 		}
4151 		if (mark_client_expired_locked(clp)) {
4152 			dprintk("NFSD: client in use (clientid %08x)\n",
4153 				clp->cl_clientid.cl_id);
4154 			continue;
4155 		}
4156 		list_add(&clp->cl_lru, &reaplist);
4157 	}
4158 	spin_unlock(&nn->client_lock);
4159 	list_for_each_safe(pos, next, &reaplist) {
4160 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4161 		dprintk("NFSD: purging unused client (clientid %08x)\n",
4162 			clp->cl_clientid.cl_id);
4163 		list_del_init(&clp->cl_lru);
4164 		expire_client(clp);
4165 	}
4166 	spin_lock(&state_lock);
4167 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
4168 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4169 		if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
4170 			continue;
4171 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4172 			t = dp->dl_time - cutoff;
4173 			new_timeo = min(new_timeo, t);
4174 			break;
4175 		}
4176 		unhash_delegation_locked(dp);
4177 		list_add(&dp->dl_recall_lru, &reaplist);
4178 	}
4179 	spin_unlock(&state_lock);
4180 	while (!list_empty(&reaplist)) {
4181 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
4182 					dl_recall_lru);
4183 		list_del_init(&dp->dl_recall_lru);
4184 		revoke_delegation(dp);
4185 	}
4186 
4187 	spin_lock(&nn->client_lock);
4188 	while (!list_empty(&nn->close_lru)) {
4189 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4190 					oo_close_lru);
4191 		if (time_after((unsigned long)oo->oo_time,
4192 			       (unsigned long)cutoff)) {
4193 			t = oo->oo_time - cutoff;
4194 			new_timeo = min(new_timeo, t);
4195 			break;
4196 		}
4197 		list_del_init(&oo->oo_close_lru);
4198 		stp = oo->oo_last_closed_stid;
4199 		oo->oo_last_closed_stid = NULL;
4200 		spin_unlock(&nn->client_lock);
4201 		nfs4_put_stid(&stp->st_stid);
4202 		spin_lock(&nn->client_lock);
4203 	}
4204 	spin_unlock(&nn->client_lock);
4205 
4206 	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4207 	return new_timeo;
4208 }
4209 
4210 static struct workqueue_struct *laundry_wq;
4211 static void laundromat_main(struct work_struct *);
4212 
4213 static void
4214 laundromat_main(struct work_struct *laundry)
4215 {
4216 	time_t t;
4217 	struct delayed_work *dwork = container_of(laundry, struct delayed_work,
4218 						  work);
4219 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4220 					   laundromat_work);
4221 
4222 	t = nfs4_laundromat(nn);
4223 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4224 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4225 }
4226 
4227 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
4228 {
4229 	if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
4230 		return nfserr_bad_stateid;
4231 	return nfs_ok;
4232 }
4233 
4234 static inline int
4235 access_permit_read(struct nfs4_ol_stateid *stp)
4236 {
4237 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4238 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4239 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4240 }
4241 
4242 static inline int
4243 access_permit_write(struct nfs4_ol_stateid *stp)
4244 {
4245 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4246 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4247 }
4248 
4249 static
4250 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4251 {
4252         __be32 status = nfserr_openmode;
4253 
4254 	/* For lock stateid's, we test the parent open, not the lock: */
4255 	if (stp->st_openstp)
4256 		stp = stp->st_openstp;
4257 	if ((flags & WR_STATE) && !access_permit_write(stp))
4258                 goto out;
4259 	if ((flags & RD_STATE) && !access_permit_read(stp))
4260                 goto out;
4261 	status = nfs_ok;
4262 out:
4263 	return status;
4264 }
4265 
4266 static inline __be32
4267 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4268 {
4269 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
4270 		return nfs_ok;
4271 	else if (locks_in_grace(net)) {
4272 		/* Answer in remaining cases depends on existence of
4273 		 * conflicting state; so we must wait out the grace period. */
4274 		return nfserr_grace;
4275 	} else if (flags & WR_STATE)
4276 		return nfs4_share_conflict(current_fh,
4277 				NFS4_SHARE_DENY_WRITE);
4278 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4279 		return nfs4_share_conflict(current_fh,
4280 				NFS4_SHARE_DENY_READ);
4281 }
4282 
4283 /*
4284  * Allow READ/WRITE during grace period on recovered state only for files
4285  * that are not able to provide mandatory locking.
4286  */
4287 static inline int
4288 grace_disallows_io(struct net *net, struct inode *inode)
4289 {
4290 	return locks_in_grace(net) && mandatory_lock(inode);
4291 }
4292 
4293 /* Returns true iff a is later than b: */
4294 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
4295 {
4296 	return (s32)(a->si_generation - b->si_generation) > 0;
4297 }
4298 
4299 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4300 {
4301 	/*
4302 	 * When sessions are used the stateid generation number is ignored
4303 	 * when it is zero.
4304 	 */
4305 	if (has_session && in->si_generation == 0)
4306 		return nfs_ok;
4307 
4308 	if (in->si_generation == ref->si_generation)
4309 		return nfs_ok;
4310 
4311 	/* If the client sends us a stateid from the future, it's buggy: */
4312 	if (stateid_generation_after(in, ref))
4313 		return nfserr_bad_stateid;
4314 	/*
4315 	 * However, we could see a stateid from the past, even from a
4316 	 * non-buggy client.  For example, if the client sends a lock
4317 	 * while some IO is outstanding, the lock may bump si_generation
4318 	 * while the IO is still in flight.  The client could avoid that
4319 	 * situation by waiting for responses on all the IO requests,
4320 	 * but better performance may result in retrying IO that
4321 	 * receives an old_stateid error if requests are rarely
4322 	 * reordered in flight:
4323 	 */
4324 	return nfserr_old_stateid;
4325 }
4326 
4327 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4328 {
4329 	struct nfs4_stid *s;
4330 	struct nfs4_ol_stateid *ols;
4331 	__be32 status = nfserr_bad_stateid;
4332 
4333 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4334 		return status;
4335 	/* Client debugging aid. */
4336 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4337 		char addr_str[INET6_ADDRSTRLEN];
4338 		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4339 				 sizeof(addr_str));
4340 		pr_warn_ratelimited("NFSD: client %s testing state ID "
4341 					"with incorrect client ID\n", addr_str);
4342 		return status;
4343 	}
4344 	spin_lock(&cl->cl_lock);
4345 	s = find_stateid_locked(cl, stateid);
4346 	if (!s)
4347 		goto out_unlock;
4348 	status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4349 	if (status)
4350 		goto out_unlock;
4351 	switch (s->sc_type) {
4352 	case NFS4_DELEG_STID:
4353 		status = nfs_ok;
4354 		break;
4355 	case NFS4_REVOKED_DELEG_STID:
4356 		status = nfserr_deleg_revoked;
4357 		break;
4358 	case NFS4_OPEN_STID:
4359 	case NFS4_LOCK_STID:
4360 		ols = openlockstateid(s);
4361 		if (ols->st_stateowner->so_is_open_owner
4362 	    			&& !(openowner(ols->st_stateowner)->oo_flags
4363 						& NFS4_OO_CONFIRMED))
4364 			status = nfserr_bad_stateid;
4365 		else
4366 			status = nfs_ok;
4367 		break;
4368 	default:
4369 		printk("unknown stateid type %x\n", s->sc_type);
4370 		/* Fallthrough */
4371 	case NFS4_CLOSED_STID:
4372 	case NFS4_CLOSED_DELEG_STID:
4373 		status = nfserr_bad_stateid;
4374 	}
4375 out_unlock:
4376 	spin_unlock(&cl->cl_lock);
4377 	return status;
4378 }
4379 
4380 static __be32
4381 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4382 		     stateid_t *stateid, unsigned char typemask,
4383 		     struct nfs4_stid **s, struct nfsd_net *nn)
4384 {
4385 	__be32 status;
4386 
4387 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4388 		return nfserr_bad_stateid;
4389 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4390 	if (status == nfserr_stale_clientid) {
4391 		if (cstate->session)
4392 			return nfserr_bad_stateid;
4393 		return nfserr_stale_stateid;
4394 	}
4395 	if (status)
4396 		return status;
4397 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
4398 	if (!*s)
4399 		return nfserr_bad_stateid;
4400 	return nfs_ok;
4401 }
4402 
4403 /*
4404 * Checks for stateid operations
4405 */
4406 __be32
4407 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
4408 			   stateid_t *stateid, int flags, struct file **filpp)
4409 {
4410 	struct nfs4_stid *s;
4411 	struct nfs4_ol_stateid *stp = NULL;
4412 	struct nfs4_delegation *dp = NULL;
4413 	struct svc_fh *current_fh = &cstate->current_fh;
4414 	struct inode *ino = current_fh->fh_dentry->d_inode;
4415 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4416 	struct file *file = NULL;
4417 	__be32 status;
4418 
4419 	if (filpp)
4420 		*filpp = NULL;
4421 
4422 	if (grace_disallows_io(net, ino))
4423 		return nfserr_grace;
4424 
4425 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4426 		return check_special_stateids(net, current_fh, stateid, flags);
4427 
4428 	status = nfsd4_lookup_stateid(cstate, stateid,
4429 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4430 				&s, nn);
4431 	if (status)
4432 		return status;
4433 	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
4434 	if (status)
4435 		goto out;
4436 	switch (s->sc_type) {
4437 	case NFS4_DELEG_STID:
4438 		dp = delegstateid(s);
4439 		status = nfs4_check_delegmode(dp, flags);
4440 		if (status)
4441 			goto out;
4442 		if (filpp) {
4443 			file = dp->dl_stid.sc_file->fi_deleg_file;
4444 			if (!file) {
4445 				WARN_ON_ONCE(1);
4446 				status = nfserr_serverfault;
4447 				goto out;
4448 			}
4449 			get_file(file);
4450 		}
4451 		break;
4452 	case NFS4_OPEN_STID:
4453 	case NFS4_LOCK_STID:
4454 		stp = openlockstateid(s);
4455 		status = nfs4_check_fh(current_fh, stp);
4456 		if (status)
4457 			goto out;
4458 		if (stp->st_stateowner->so_is_open_owner
4459 		    && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4460 			goto out;
4461 		status = nfs4_check_openmode(stp, flags);
4462 		if (status)
4463 			goto out;
4464 		if (filpp) {
4465 			struct nfs4_file *fp = stp->st_stid.sc_file;
4466 
4467 			if (flags & RD_STATE)
4468 				file = find_readable_file(fp);
4469 			else
4470 				file = find_writeable_file(fp);
4471 		}
4472 		break;
4473 	default:
4474 		status = nfserr_bad_stateid;
4475 		goto out;
4476 	}
4477 	status = nfs_ok;
4478 	if (file)
4479 		*filpp = file;
4480 out:
4481 	nfs4_put_stid(s);
4482 	return status;
4483 }
4484 
4485 /*
4486  * Test if the stateid is valid
4487  */
4488 __be32
4489 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4490 		   struct nfsd4_test_stateid *test_stateid)
4491 {
4492 	struct nfsd4_test_stateid_id *stateid;
4493 	struct nfs4_client *cl = cstate->session->se_client;
4494 
4495 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4496 		stateid->ts_id_status =
4497 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4498 
4499 	return nfs_ok;
4500 }
4501 
4502 __be32
4503 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4504 		   struct nfsd4_free_stateid *free_stateid)
4505 {
4506 	stateid_t *stateid = &free_stateid->fr_stateid;
4507 	struct nfs4_stid *s;
4508 	struct nfs4_delegation *dp;
4509 	struct nfs4_ol_stateid *stp;
4510 	struct nfs4_client *cl = cstate->session->se_client;
4511 	__be32 ret = nfserr_bad_stateid;
4512 
4513 	spin_lock(&cl->cl_lock);
4514 	s = find_stateid_locked(cl, stateid);
4515 	if (!s)
4516 		goto out_unlock;
4517 	switch (s->sc_type) {
4518 	case NFS4_DELEG_STID:
4519 		ret = nfserr_locks_held;
4520 		break;
4521 	case NFS4_OPEN_STID:
4522 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4523 		if (ret)
4524 			break;
4525 		ret = nfserr_locks_held;
4526 		break;
4527 	case NFS4_LOCK_STID:
4528 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4529 		if (ret)
4530 			break;
4531 		stp = openlockstateid(s);
4532 		ret = nfserr_locks_held;
4533 		if (check_for_locks(stp->st_stid.sc_file,
4534 				    lockowner(stp->st_stateowner)))
4535 			break;
4536 		unhash_lock_stateid(stp);
4537 		spin_unlock(&cl->cl_lock);
4538 		nfs4_put_stid(s);
4539 		ret = nfs_ok;
4540 		goto out;
4541 	case NFS4_REVOKED_DELEG_STID:
4542 		dp = delegstateid(s);
4543 		list_del_init(&dp->dl_recall_lru);
4544 		spin_unlock(&cl->cl_lock);
4545 		nfs4_put_stid(s);
4546 		ret = nfs_ok;
4547 		goto out;
4548 	/* Default falls through and returns nfserr_bad_stateid */
4549 	}
4550 out_unlock:
4551 	spin_unlock(&cl->cl_lock);
4552 out:
4553 	return ret;
4554 }
4555 
4556 static inline int
4557 setlkflg (int type)
4558 {
4559 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
4560 		RD_STATE : WR_STATE;
4561 }
4562 
4563 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4564 {
4565 	struct svc_fh *current_fh = &cstate->current_fh;
4566 	struct nfs4_stateowner *sop = stp->st_stateowner;
4567 	__be32 status;
4568 
4569 	status = nfsd4_check_seqid(cstate, sop, seqid);
4570 	if (status)
4571 		return status;
4572 	if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4573 		|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4574 		/*
4575 		 * "Closed" stateid's exist *only* to return
4576 		 * nfserr_replay_me from the previous step, and
4577 		 * revoked delegations are kept only for free_stateid.
4578 		 */
4579 		return nfserr_bad_stateid;
4580 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4581 	if (status)
4582 		return status;
4583 	return nfs4_check_fh(current_fh, stp);
4584 }
4585 
4586 /*
4587  * Checks for sequence id mutating operations.
4588  */
4589 static __be32
4590 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4591 			 stateid_t *stateid, char typemask,
4592 			 struct nfs4_ol_stateid **stpp,
4593 			 struct nfsd_net *nn)
4594 {
4595 	__be32 status;
4596 	struct nfs4_stid *s;
4597 	struct nfs4_ol_stateid *stp = NULL;
4598 
4599 	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
4600 		seqid, STATEID_VAL(stateid));
4601 
4602 	*stpp = NULL;
4603 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4604 	if (status)
4605 		return status;
4606 	stp = openlockstateid(s);
4607 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
4608 
4609 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4610 	if (!status)
4611 		*stpp = stp;
4612 	else
4613 		nfs4_put_stid(&stp->st_stid);
4614 	return status;
4615 }
4616 
4617 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4618 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4619 {
4620 	__be32 status;
4621 	struct nfs4_openowner *oo;
4622 	struct nfs4_ol_stateid *stp;
4623 
4624 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4625 						NFS4_OPEN_STID, &stp, nn);
4626 	if (status)
4627 		return status;
4628 	oo = openowner(stp->st_stateowner);
4629 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4630 		nfs4_put_stid(&stp->st_stid);
4631 		return nfserr_bad_stateid;
4632 	}
4633 	*stpp = stp;
4634 	return nfs_ok;
4635 }
4636 
4637 __be32
4638 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4639 		   struct nfsd4_open_confirm *oc)
4640 {
4641 	__be32 status;
4642 	struct nfs4_openowner *oo;
4643 	struct nfs4_ol_stateid *stp;
4644 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4645 
4646 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4647 			cstate->current_fh.fh_dentry);
4648 
4649 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4650 	if (status)
4651 		return status;
4652 
4653 	status = nfs4_preprocess_seqid_op(cstate,
4654 					oc->oc_seqid, &oc->oc_req_stateid,
4655 					NFS4_OPEN_STID, &stp, nn);
4656 	if (status)
4657 		goto out;
4658 	oo = openowner(stp->st_stateowner);
4659 	status = nfserr_bad_stateid;
4660 	if (oo->oo_flags & NFS4_OO_CONFIRMED)
4661 		goto put_stateid;
4662 	oo->oo_flags |= NFS4_OO_CONFIRMED;
4663 	update_stateid(&stp->st_stid.sc_stateid);
4664 	memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4665 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4666 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4667 
4668 	nfsd4_client_record_create(oo->oo_owner.so_client);
4669 	status = nfs_ok;
4670 put_stateid:
4671 	nfs4_put_stid(&stp->st_stid);
4672 out:
4673 	nfsd4_bump_seqid(cstate, status);
4674 	return status;
4675 }
4676 
4677 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
4678 {
4679 	if (!test_access(access, stp))
4680 		return;
4681 	nfs4_file_put_access(stp->st_stid.sc_file, access);
4682 	clear_access(access, stp);
4683 }
4684 
4685 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
4686 {
4687 	switch (to_access) {
4688 	case NFS4_SHARE_ACCESS_READ:
4689 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
4690 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4691 		break;
4692 	case NFS4_SHARE_ACCESS_WRITE:
4693 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
4694 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4695 		break;
4696 	case NFS4_SHARE_ACCESS_BOTH:
4697 		break;
4698 	default:
4699 		WARN_ON_ONCE(1);
4700 	}
4701 }
4702 
4703 __be32
4704 nfsd4_open_downgrade(struct svc_rqst *rqstp,
4705 		     struct nfsd4_compound_state *cstate,
4706 		     struct nfsd4_open_downgrade *od)
4707 {
4708 	__be32 status;
4709 	struct nfs4_ol_stateid *stp;
4710 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4711 
4712 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
4713 			cstate->current_fh.fh_dentry);
4714 
4715 	/* We don't yet support WANT bits: */
4716 	if (od->od_deleg_want)
4717 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
4718 			od->od_deleg_want);
4719 
4720 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4721 					&od->od_stateid, &stp, nn);
4722 	if (status)
4723 		goto out;
4724 	status = nfserr_inval;
4725 	if (!test_access(od->od_share_access, stp)) {
4726 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4727 			stp->st_access_bmap, od->od_share_access);
4728 		goto put_stateid;
4729 	}
4730 	if (!test_deny(od->od_share_deny, stp)) {
4731 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4732 			stp->st_deny_bmap, od->od_share_deny);
4733 		goto put_stateid;
4734 	}
4735 	nfs4_stateid_downgrade(stp, od->od_share_access);
4736 
4737 	reset_union_bmap_deny(od->od_share_deny, stp);
4738 
4739 	update_stateid(&stp->st_stid.sc_stateid);
4740 	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4741 	status = nfs_ok;
4742 put_stateid:
4743 	nfs4_put_stid(&stp->st_stid);
4744 out:
4745 	nfsd4_bump_seqid(cstate, status);
4746 	return status;
4747 }
4748 
4749 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
4750 {
4751 	struct nfs4_client *clp = s->st_stid.sc_client;
4752 	LIST_HEAD(reaplist);
4753 
4754 	s->st_stid.sc_type = NFS4_CLOSED_STID;
4755 	spin_lock(&clp->cl_lock);
4756 	unhash_open_stateid(s, &reaplist);
4757 
4758 	if (clp->cl_minorversion) {
4759 		put_ol_stateid_locked(s, &reaplist);
4760 		spin_unlock(&clp->cl_lock);
4761 		free_ol_stateid_reaplist(&reaplist);
4762 	} else {
4763 		spin_unlock(&clp->cl_lock);
4764 		free_ol_stateid_reaplist(&reaplist);
4765 		move_to_close_lru(s, clp->net);
4766 	}
4767 }
4768 
4769 /*
4770  * nfs4_unlock_state() called after encode
4771  */
4772 __be32
4773 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4774 	    struct nfsd4_close *close)
4775 {
4776 	__be32 status;
4777 	struct nfs4_ol_stateid *stp;
4778 	struct net *net = SVC_NET(rqstp);
4779 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4780 
4781 	dprintk("NFSD: nfsd4_close on file %pd\n",
4782 			cstate->current_fh.fh_dentry);
4783 
4784 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4785 					&close->cl_stateid,
4786 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
4787 					&stp, nn);
4788 	nfsd4_bump_seqid(cstate, status);
4789 	if (status)
4790 		goto out;
4791 	update_stateid(&stp->st_stid.sc_stateid);
4792 	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4793 
4794 	nfsd4_close_open_stateid(stp);
4795 
4796 	/* put reference from nfs4_preprocess_seqid_op */
4797 	nfs4_put_stid(&stp->st_stid);
4798 out:
4799 	return status;
4800 }
4801 
4802 __be32
4803 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4804 		  struct nfsd4_delegreturn *dr)
4805 {
4806 	struct nfs4_delegation *dp;
4807 	stateid_t *stateid = &dr->dr_stateid;
4808 	struct nfs4_stid *s;
4809 	__be32 status;
4810 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4811 
4812 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4813 		return status;
4814 
4815 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4816 	if (status)
4817 		goto out;
4818 	dp = delegstateid(s);
4819 	status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4820 	if (status)
4821 		goto put_stateid;
4822 
4823 	destroy_delegation(dp);
4824 put_stateid:
4825 	nfs4_put_stid(&dp->dl_stid);
4826 out:
4827 	return status;
4828 }
4829 
4830 
4831 #define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
4832 
4833 static inline u64
4834 end_offset(u64 start, u64 len)
4835 {
4836 	u64 end;
4837 
4838 	end = start + len;
4839 	return end >= start ? end: NFS4_MAX_UINT64;
4840 }
4841 
4842 /* last octet in a range */
4843 static inline u64
4844 last_byte_offset(u64 start, u64 len)
4845 {
4846 	u64 end;
4847 
4848 	WARN_ON_ONCE(!len);
4849 	end = start + len;
4850 	return end > start ? end - 1: NFS4_MAX_UINT64;
4851 }
4852 
4853 /*
4854  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4855  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4856  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
4857  * locking, this prevents us from being completely protocol-compliant.  The
4858  * real solution to this problem is to start using unsigned file offsets in
4859  * the VFS, but this is a very deep change!
4860  */
4861 static inline void
4862 nfs4_transform_lock_offset(struct file_lock *lock)
4863 {
4864 	if (lock->fl_start < 0)
4865 		lock->fl_start = OFFSET_MAX;
4866 	if (lock->fl_end < 0)
4867 		lock->fl_end = OFFSET_MAX;
4868 }
4869 
4870 /* Hack!: For now, we're defining this just so we can use a pointer to it
4871  * as a unique cookie to identify our (NFSv4's) posix locks. */
4872 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
4873 };
4874 
4875 static inline void
4876 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
4877 {
4878 	struct nfs4_lockowner *lo;
4879 
4880 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4881 		lo = (struct nfs4_lockowner *) fl->fl_owner;
4882 		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
4883 					lo->lo_owner.so_owner.len, GFP_KERNEL);
4884 		if (!deny->ld_owner.data)
4885 			/* We just don't care that much */
4886 			goto nevermind;
4887 		deny->ld_owner.len = lo->lo_owner.so_owner.len;
4888 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4889 	} else {
4890 nevermind:
4891 		deny->ld_owner.len = 0;
4892 		deny->ld_owner.data = NULL;
4893 		deny->ld_clientid.cl_boot = 0;
4894 		deny->ld_clientid.cl_id = 0;
4895 	}
4896 	deny->ld_start = fl->fl_start;
4897 	deny->ld_length = NFS4_MAX_UINT64;
4898 	if (fl->fl_end != NFS4_MAX_UINT64)
4899 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
4900 	deny->ld_type = NFS4_READ_LT;
4901 	if (fl->fl_type != F_RDLCK)
4902 		deny->ld_type = NFS4_WRITE_LT;
4903 }
4904 
4905 static struct nfs4_lockowner *
4906 find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
4907 		struct nfs4_client *clp)
4908 {
4909 	unsigned int strhashval = ownerstr_hashval(owner);
4910 	struct nfs4_stateowner *so;
4911 
4912 	lockdep_assert_held(&clp->cl_lock);
4913 
4914 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
4915 			    so_strhash) {
4916 		if (so->so_is_open_owner)
4917 			continue;
4918 		if (!same_owner_str(so, owner))
4919 			continue;
4920 		atomic_inc(&so->so_count);
4921 		return lockowner(so);
4922 	}
4923 	return NULL;
4924 }
4925 
4926 static struct nfs4_lockowner *
4927 find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
4928 		struct nfs4_client *clp)
4929 {
4930 	struct nfs4_lockowner *lo;
4931 
4932 	spin_lock(&clp->cl_lock);
4933 	lo = find_lockowner_str_locked(clid, owner, clp);
4934 	spin_unlock(&clp->cl_lock);
4935 	return lo;
4936 }
4937 
4938 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
4939 {
4940 	unhash_lockowner_locked(lockowner(sop));
4941 }
4942 
4943 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
4944 {
4945 	struct nfs4_lockowner *lo = lockowner(sop);
4946 
4947 	kmem_cache_free(lockowner_slab, lo);
4948 }
4949 
4950 static const struct nfs4_stateowner_operations lockowner_ops = {
4951 	.so_unhash =	nfs4_unhash_lockowner,
4952 	.so_free =	nfs4_free_lockowner,
4953 };
4954 
4955 /*
4956  * Alloc a lock owner structure.
4957  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
4958  * occurred.
4959  *
4960  * strhashval = ownerstr_hashval
4961  */
4962 static struct nfs4_lockowner *
4963 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
4964 			   struct nfs4_ol_stateid *open_stp,
4965 			   struct nfsd4_lock *lock)
4966 {
4967 	struct nfs4_lockowner *lo, *ret;
4968 
4969 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4970 	if (!lo)
4971 		return NULL;
4972 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4973 	lo->lo_owner.so_is_open_owner = 0;
4974 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
4975 	lo->lo_owner.so_ops = &lockowner_ops;
4976 	spin_lock(&clp->cl_lock);
4977 	ret = find_lockowner_str_locked(&clp->cl_clientid,
4978 			&lock->lk_new_owner, clp);
4979 	if (ret == NULL) {
4980 		list_add(&lo->lo_owner.so_strhash,
4981 			 &clp->cl_ownerstr_hashtbl[strhashval]);
4982 		ret = lo;
4983 	} else
4984 		nfs4_free_lockowner(&lo->lo_owner);
4985 	spin_unlock(&clp->cl_lock);
4986 	return lo;
4987 }
4988 
4989 static void
4990 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
4991 		  struct nfs4_file *fp, struct inode *inode,
4992 		  struct nfs4_ol_stateid *open_stp)
4993 {
4994 	struct nfs4_client *clp = lo->lo_owner.so_client;
4995 
4996 	lockdep_assert_held(&clp->cl_lock);
4997 
4998 	atomic_inc(&stp->st_stid.sc_count);
4999 	stp->st_stid.sc_type = NFS4_LOCK_STID;
5000 	stp->st_stateowner = &lo->lo_owner;
5001 	atomic_inc(&lo->lo_owner.so_count);
5002 	get_nfs4_file(fp);
5003 	stp->st_stid.sc_file = fp;
5004 	stp->st_stid.sc_free = nfs4_free_lock_stateid;
5005 	stp->st_access_bmap = 0;
5006 	stp->st_deny_bmap = open_stp->st_deny_bmap;
5007 	stp->st_openstp = open_stp;
5008 	list_add(&stp->st_locks, &open_stp->st_locks);
5009 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5010 	spin_lock(&fp->fi_lock);
5011 	list_add(&stp->st_perfile, &fp->fi_stateids);
5012 	spin_unlock(&fp->fi_lock);
5013 }
5014 
5015 static struct nfs4_ol_stateid *
5016 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5017 {
5018 	struct nfs4_ol_stateid *lst;
5019 	struct nfs4_client *clp = lo->lo_owner.so_client;
5020 
5021 	lockdep_assert_held(&clp->cl_lock);
5022 
5023 	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5024 		if (lst->st_stid.sc_file == fp) {
5025 			atomic_inc(&lst->st_stid.sc_count);
5026 			return lst;
5027 		}
5028 	}
5029 	return NULL;
5030 }
5031 
5032 static struct nfs4_ol_stateid *
5033 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5034 			    struct inode *inode, struct nfs4_ol_stateid *ost,
5035 			    bool *new)
5036 {
5037 	struct nfs4_stid *ns = NULL;
5038 	struct nfs4_ol_stateid *lst;
5039 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5040 	struct nfs4_client *clp = oo->oo_owner.so_client;
5041 
5042 	spin_lock(&clp->cl_lock);
5043 	lst = find_lock_stateid(lo, fi);
5044 	if (lst == NULL) {
5045 		spin_unlock(&clp->cl_lock);
5046 		ns = nfs4_alloc_stid(clp, stateid_slab);
5047 		if (ns == NULL)
5048 			return NULL;
5049 
5050 		spin_lock(&clp->cl_lock);
5051 		lst = find_lock_stateid(lo, fi);
5052 		if (likely(!lst)) {
5053 			lst = openlockstateid(ns);
5054 			init_lock_stateid(lst, lo, fi, inode, ost);
5055 			ns = NULL;
5056 			*new = true;
5057 		}
5058 	}
5059 	spin_unlock(&clp->cl_lock);
5060 	if (ns)
5061 		nfs4_put_stid(ns);
5062 	return lst;
5063 }
5064 
5065 static int
5066 check_lock_length(u64 offset, u64 length)
5067 {
5068 	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
5069 	     LOFF_OVERFLOW(offset, length)));
5070 }
5071 
5072 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5073 {
5074 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5075 
5076 	lockdep_assert_held(&fp->fi_lock);
5077 
5078 	if (test_access(access, lock_stp))
5079 		return;
5080 	__nfs4_file_get_access(fp, access);
5081 	set_access(access, lock_stp);
5082 }
5083 
5084 static __be32
5085 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5086 			    struct nfs4_ol_stateid *ost,
5087 			    struct nfsd4_lock *lock,
5088 			    struct nfs4_ol_stateid **lst, bool *new)
5089 {
5090 	__be32 status;
5091 	struct nfs4_file *fi = ost->st_stid.sc_file;
5092 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5093 	struct nfs4_client *cl = oo->oo_owner.so_client;
5094 	struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
5095 	struct nfs4_lockowner *lo;
5096 	unsigned int strhashval;
5097 
5098 	lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
5099 	if (!lo) {
5100 		strhashval = ownerstr_hashval(&lock->v.new.owner);
5101 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5102 		if (lo == NULL)
5103 			return nfserr_jukebox;
5104 	} else {
5105 		/* with an existing lockowner, seqids must be the same */
5106 		status = nfserr_bad_seqid;
5107 		if (!cstate->minorversion &&
5108 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5109 			goto out;
5110 	}
5111 
5112 	*lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5113 	if (*lst == NULL) {
5114 		status = nfserr_jukebox;
5115 		goto out;
5116 	}
5117 	status = nfs_ok;
5118 out:
5119 	nfs4_put_stateowner(&lo->lo_owner);
5120 	return status;
5121 }
5122 
5123 /*
5124  *  LOCK operation
5125  */
5126 __be32
5127 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5128 	   struct nfsd4_lock *lock)
5129 {
5130 	struct nfs4_openowner *open_sop = NULL;
5131 	struct nfs4_lockowner *lock_sop = NULL;
5132 	struct nfs4_ol_stateid *lock_stp = NULL;
5133 	struct nfs4_ol_stateid *open_stp = NULL;
5134 	struct nfs4_file *fp;
5135 	struct file *filp = NULL;
5136 	struct file_lock *file_lock = NULL;
5137 	struct file_lock *conflock = NULL;
5138 	__be32 status = 0;
5139 	int lkflg;
5140 	int err;
5141 	bool new = false;
5142 	struct net *net = SVC_NET(rqstp);
5143 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5144 
5145 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5146 		(long long) lock->lk_offset,
5147 		(long long) lock->lk_length);
5148 
5149 	if (check_lock_length(lock->lk_offset, lock->lk_length))
5150 		 return nfserr_inval;
5151 
5152 	if ((status = fh_verify(rqstp, &cstate->current_fh,
5153 				S_IFREG, NFSD_MAY_LOCK))) {
5154 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
5155 		return status;
5156 	}
5157 
5158 	if (lock->lk_is_new) {
5159 		if (nfsd4_has_session(cstate))
5160 			/* See rfc 5661 18.10.3: given clientid is ignored: */
5161 			memcpy(&lock->v.new.clientid,
5162 				&cstate->session->se_client->cl_clientid,
5163 				sizeof(clientid_t));
5164 
5165 		status = nfserr_stale_clientid;
5166 		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5167 			goto out;
5168 
5169 		/* validate and update open stateid and open seqid */
5170 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
5171 				        lock->lk_new_open_seqid,
5172 		                        &lock->lk_new_open_stateid,
5173 					&open_stp, nn);
5174 		if (status)
5175 			goto out;
5176 		open_sop = openowner(open_stp->st_stateowner);
5177 		status = nfserr_bad_stateid;
5178 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5179 						&lock->v.new.clientid))
5180 			goto out;
5181 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
5182 							&lock_stp, &new);
5183 	} else {
5184 		status = nfs4_preprocess_seqid_op(cstate,
5185 				       lock->lk_old_lock_seqid,
5186 				       &lock->lk_old_lock_stateid,
5187 				       NFS4_LOCK_STID, &lock_stp, nn);
5188 	}
5189 	if (status)
5190 		goto out;
5191 	lock_sop = lockowner(lock_stp->st_stateowner);
5192 
5193 	lkflg = setlkflg(lock->lk_type);
5194 	status = nfs4_check_openmode(lock_stp, lkflg);
5195 	if (status)
5196 		goto out;
5197 
5198 	status = nfserr_grace;
5199 	if (locks_in_grace(net) && !lock->lk_reclaim)
5200 		goto out;
5201 	status = nfserr_no_grace;
5202 	if (!locks_in_grace(net) && lock->lk_reclaim)
5203 		goto out;
5204 
5205 	file_lock = locks_alloc_lock();
5206 	if (!file_lock) {
5207 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5208 		status = nfserr_jukebox;
5209 		goto out;
5210 	}
5211 
5212 	fp = lock_stp->st_stid.sc_file;
5213 	locks_init_lock(file_lock);
5214 	switch (lock->lk_type) {
5215 		case NFS4_READ_LT:
5216 		case NFS4_READW_LT:
5217 			spin_lock(&fp->fi_lock);
5218 			filp = find_readable_file_locked(fp);
5219 			if (filp)
5220 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5221 			spin_unlock(&fp->fi_lock);
5222 			file_lock->fl_type = F_RDLCK;
5223 			break;
5224 		case NFS4_WRITE_LT:
5225 		case NFS4_WRITEW_LT:
5226 			spin_lock(&fp->fi_lock);
5227 			filp = find_writeable_file_locked(fp);
5228 			if (filp)
5229 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5230 			spin_unlock(&fp->fi_lock);
5231 			file_lock->fl_type = F_WRLCK;
5232 			break;
5233 		default:
5234 			status = nfserr_inval;
5235 		goto out;
5236 	}
5237 	if (!filp) {
5238 		status = nfserr_openmode;
5239 		goto out;
5240 	}
5241 	file_lock->fl_owner = (fl_owner_t)lock_sop;
5242 	file_lock->fl_pid = current->tgid;
5243 	file_lock->fl_file = filp;
5244 	file_lock->fl_flags = FL_POSIX;
5245 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
5246 	file_lock->fl_start = lock->lk_offset;
5247 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
5248 	nfs4_transform_lock_offset(file_lock);
5249 
5250 	conflock = locks_alloc_lock();
5251 	if (!conflock) {
5252 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5253 		status = nfserr_jukebox;
5254 		goto out;
5255 	}
5256 
5257 	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5258 	switch (-err) {
5259 	case 0: /* success! */
5260 		update_stateid(&lock_stp->st_stid.sc_stateid);
5261 		memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
5262 				sizeof(stateid_t));
5263 		status = 0;
5264 		break;
5265 	case (EAGAIN):		/* conflock holds conflicting lock */
5266 		status = nfserr_denied;
5267 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5268 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
5269 		break;
5270 	case (EDEADLK):
5271 		status = nfserr_deadlock;
5272 		break;
5273 	default:
5274 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5275 		status = nfserrno(err);
5276 		break;
5277 	}
5278 out:
5279 	if (filp)
5280 		fput(filp);
5281 	if (lock_stp) {
5282 		/* Bump seqid manually if the 4.0 replay owner is openowner */
5283 		if (cstate->replay_owner &&
5284 		    cstate->replay_owner != &lock_sop->lo_owner &&
5285 		    seqid_mutating_err(ntohl(status)))
5286 			lock_sop->lo_owner.so_seqid++;
5287 
5288 		/*
5289 		 * If this is a new, never-before-used stateid, and we are
5290 		 * returning an error, then just go ahead and release it.
5291 		 */
5292 		if (status && new)
5293 			release_lock_stateid(lock_stp);
5294 
5295 		nfs4_put_stid(&lock_stp->st_stid);
5296 	}
5297 	if (open_stp)
5298 		nfs4_put_stid(&open_stp->st_stid);
5299 	nfsd4_bump_seqid(cstate, status);
5300 	if (file_lock)
5301 		locks_free_lock(file_lock);
5302 	if (conflock)
5303 		locks_free_lock(conflock);
5304 	return status;
5305 }
5306 
5307 /*
5308  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5309  * so we do a temporary open here just to get an open file to pass to
5310  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
5311  * inode operation.)
5312  */
5313 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5314 {
5315 	struct file *file;
5316 	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
5317 	if (!err) {
5318 		err = nfserrno(vfs_test_lock(file, lock));
5319 		nfsd_close(file);
5320 	}
5321 	return err;
5322 }
5323 
5324 /*
5325  * LOCKT operation
5326  */
5327 __be32
5328 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5329 	    struct nfsd4_lockt *lockt)
5330 {
5331 	struct file_lock *file_lock = NULL;
5332 	struct nfs4_lockowner *lo = NULL;
5333 	__be32 status;
5334 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5335 
5336 	if (locks_in_grace(SVC_NET(rqstp)))
5337 		return nfserr_grace;
5338 
5339 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
5340 		 return nfserr_inval;
5341 
5342 	if (!nfsd4_has_session(cstate)) {
5343 		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5344 		if (status)
5345 			goto out;
5346 	}
5347 
5348 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5349 		goto out;
5350 
5351 	file_lock = locks_alloc_lock();
5352 	if (!file_lock) {
5353 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5354 		status = nfserr_jukebox;
5355 		goto out;
5356 	}
5357 	locks_init_lock(file_lock);
5358 	switch (lockt->lt_type) {
5359 		case NFS4_READ_LT:
5360 		case NFS4_READW_LT:
5361 			file_lock->fl_type = F_RDLCK;
5362 		break;
5363 		case NFS4_WRITE_LT:
5364 		case NFS4_WRITEW_LT:
5365 			file_lock->fl_type = F_WRLCK;
5366 		break;
5367 		default:
5368 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5369 			status = nfserr_inval;
5370 		goto out;
5371 	}
5372 
5373 	lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
5374 				cstate->clp);
5375 	if (lo)
5376 		file_lock->fl_owner = (fl_owner_t)lo;
5377 	file_lock->fl_pid = current->tgid;
5378 	file_lock->fl_flags = FL_POSIX;
5379 
5380 	file_lock->fl_start = lockt->lt_offset;
5381 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
5382 
5383 	nfs4_transform_lock_offset(file_lock);
5384 
5385 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5386 	if (status)
5387 		goto out;
5388 
5389 	if (file_lock->fl_type != F_UNLCK) {
5390 		status = nfserr_denied;
5391 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
5392 	}
5393 out:
5394 	if (lo)
5395 		nfs4_put_stateowner(&lo->lo_owner);
5396 	if (file_lock)
5397 		locks_free_lock(file_lock);
5398 	return status;
5399 }
5400 
5401 __be32
5402 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5403 	    struct nfsd4_locku *locku)
5404 {
5405 	struct nfs4_ol_stateid *stp;
5406 	struct file *filp = NULL;
5407 	struct file_lock *file_lock = NULL;
5408 	__be32 status;
5409 	int err;
5410 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5411 
5412 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5413 		(long long) locku->lu_offset,
5414 		(long long) locku->lu_length);
5415 
5416 	if (check_lock_length(locku->lu_offset, locku->lu_length))
5417 		 return nfserr_inval;
5418 
5419 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5420 					&locku->lu_stateid, NFS4_LOCK_STID,
5421 					&stp, nn);
5422 	if (status)
5423 		goto out;
5424 	filp = find_any_file(stp->st_stid.sc_file);
5425 	if (!filp) {
5426 		status = nfserr_lock_range;
5427 		goto put_stateid;
5428 	}
5429 	file_lock = locks_alloc_lock();
5430 	if (!file_lock) {
5431 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5432 		status = nfserr_jukebox;
5433 		goto fput;
5434 	}
5435 	locks_init_lock(file_lock);
5436 	file_lock->fl_type = F_UNLCK;
5437 	file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
5438 	file_lock->fl_pid = current->tgid;
5439 	file_lock->fl_file = filp;
5440 	file_lock->fl_flags = FL_POSIX;
5441 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
5442 	file_lock->fl_start = locku->lu_offset;
5443 
5444 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
5445 						locku->lu_length);
5446 	nfs4_transform_lock_offset(file_lock);
5447 
5448 	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5449 	if (err) {
5450 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5451 		goto out_nfserr;
5452 	}
5453 	update_stateid(&stp->st_stid.sc_stateid);
5454 	memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
5455 fput:
5456 	fput(filp);
5457 put_stateid:
5458 	nfs4_put_stid(&stp->st_stid);
5459 out:
5460 	nfsd4_bump_seqid(cstate, status);
5461 	if (file_lock)
5462 		locks_free_lock(file_lock);
5463 	return status;
5464 
5465 out_nfserr:
5466 	status = nfserrno(err);
5467 	goto fput;
5468 }
5469 
5470 /*
5471  * returns
5472  * 	true:  locks held by lockowner
5473  * 	false: no locks held by lockowner
5474  */
5475 static bool
5476 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5477 {
5478 	struct file_lock **flpp;
5479 	int status = false;
5480 	struct file *filp = find_any_file(fp);
5481 	struct inode *inode;
5482 
5483 	if (!filp) {
5484 		/* Any valid lock stateid should have some sort of access */
5485 		WARN_ON_ONCE(1);
5486 		return status;
5487 	}
5488 
5489 	inode = file_inode(filp);
5490 
5491 	spin_lock(&inode->i_lock);
5492 	for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
5493 		if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
5494 			status = true;
5495 			break;
5496 		}
5497 	}
5498 	spin_unlock(&inode->i_lock);
5499 	fput(filp);
5500 	return status;
5501 }
5502 
5503 __be32
5504 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5505 			struct nfsd4_compound_state *cstate,
5506 			struct nfsd4_release_lockowner *rlockowner)
5507 {
5508 	clientid_t *clid = &rlockowner->rl_clientid;
5509 	struct nfs4_stateowner *sop;
5510 	struct nfs4_lockowner *lo = NULL;
5511 	struct nfs4_ol_stateid *stp;
5512 	struct xdr_netobj *owner = &rlockowner->rl_owner;
5513 	unsigned int hashval = ownerstr_hashval(owner);
5514 	__be32 status;
5515 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5516 	struct nfs4_client *clp;
5517 
5518 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5519 		clid->cl_boot, clid->cl_id);
5520 
5521 	status = lookup_clientid(clid, cstate, nn);
5522 	if (status)
5523 		return status;
5524 
5525 	clp = cstate->clp;
5526 	/* Find the matching lock stateowner */
5527 	spin_lock(&clp->cl_lock);
5528 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
5529 			    so_strhash) {
5530 
5531 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
5532 			continue;
5533 
5534 		/* see if there are still any locks associated with it */
5535 		lo = lockowner(sop);
5536 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5537 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
5538 				status = nfserr_locks_held;
5539 				spin_unlock(&clp->cl_lock);
5540 				return status;
5541 			}
5542 		}
5543 
5544 		atomic_inc(&sop->so_count);
5545 		break;
5546 	}
5547 	spin_unlock(&clp->cl_lock);
5548 	if (lo)
5549 		release_lockowner(lo);
5550 	return status;
5551 }
5552 
5553 static inline struct nfs4_client_reclaim *
5554 alloc_reclaim(void)
5555 {
5556 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
5557 }
5558 
5559 bool
5560 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5561 {
5562 	struct nfs4_client_reclaim *crp;
5563 
5564 	crp = nfsd4_find_reclaim_client(name, nn);
5565 	return (crp && crp->cr_clp);
5566 }
5567 
5568 /*
5569  * failure => all reset bets are off, nfserr_no_grace...
5570  */
5571 struct nfs4_client_reclaim *
5572 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
5573 {
5574 	unsigned int strhashval;
5575 	struct nfs4_client_reclaim *crp;
5576 
5577 	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
5578 	crp = alloc_reclaim();
5579 	if (crp) {
5580 		strhashval = clientstr_hashval(name);
5581 		INIT_LIST_HEAD(&crp->cr_strhash);
5582 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
5583 		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
5584 		crp->cr_clp = NULL;
5585 		nn->reclaim_str_hashtbl_size++;
5586 	}
5587 	return crp;
5588 }
5589 
5590 void
5591 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
5592 {
5593 	list_del(&crp->cr_strhash);
5594 	kfree(crp);
5595 	nn->reclaim_str_hashtbl_size--;
5596 }
5597 
5598 void
5599 nfs4_release_reclaim(struct nfsd_net *nn)
5600 {
5601 	struct nfs4_client_reclaim *crp = NULL;
5602 	int i;
5603 
5604 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5605 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
5606 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
5607 			                struct nfs4_client_reclaim, cr_strhash);
5608 			nfs4_remove_reclaim_record(crp, nn);
5609 		}
5610 	}
5611 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
5612 }
5613 
5614 /*
5615  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5616 struct nfs4_client_reclaim *
5617 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
5618 {
5619 	unsigned int strhashval;
5620 	struct nfs4_client_reclaim *crp = NULL;
5621 
5622 	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
5623 
5624 	strhashval = clientstr_hashval(recdir);
5625 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
5626 		if (same_name(crp->cr_recdir, recdir)) {
5627 			return crp;
5628 		}
5629 	}
5630 	return NULL;
5631 }
5632 
5633 /*
5634 * Called from OPEN. Look for clientid in reclaim list.
5635 */
5636 __be32
5637 nfs4_check_open_reclaim(clientid_t *clid,
5638 		struct nfsd4_compound_state *cstate,
5639 		struct nfsd_net *nn)
5640 {
5641 	__be32 status;
5642 
5643 	/* find clientid in conf_id_hashtbl */
5644 	status = lookup_clientid(clid, cstate, nn);
5645 	if (status)
5646 		return nfserr_reclaim_bad;
5647 
5648 	if (nfsd4_client_record_check(cstate->clp))
5649 		return nfserr_reclaim_bad;
5650 
5651 	return nfs_ok;
5652 }
5653 
5654 #ifdef CONFIG_NFSD_FAULT_INJECTION
5655 static inline void
5656 put_client(struct nfs4_client *clp)
5657 {
5658 	atomic_dec(&clp->cl_refcount);
5659 }
5660 
5661 static struct nfs4_client *
5662 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
5663 {
5664 	struct nfs4_client *clp;
5665 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5666 					  nfsd_net_id);
5667 
5668 	if (!nfsd_netns_ready(nn))
5669 		return NULL;
5670 
5671 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5672 		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
5673 			return clp;
5674 	}
5675 	return NULL;
5676 }
5677 
5678 u64
5679 nfsd_inject_print_clients(void)
5680 {
5681 	struct nfs4_client *clp;
5682 	u64 count = 0;
5683 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5684 					  nfsd_net_id);
5685 	char buf[INET6_ADDRSTRLEN];
5686 
5687 	if (!nfsd_netns_ready(nn))
5688 		return 0;
5689 
5690 	spin_lock(&nn->client_lock);
5691 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5692 		rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5693 		pr_info("NFS Client: %s\n", buf);
5694 		++count;
5695 	}
5696 	spin_unlock(&nn->client_lock);
5697 
5698 	return count;
5699 }
5700 
5701 u64
5702 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
5703 {
5704 	u64 count = 0;
5705 	struct nfs4_client *clp;
5706 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5707 					  nfsd_net_id);
5708 
5709 	if (!nfsd_netns_ready(nn))
5710 		return count;
5711 
5712 	spin_lock(&nn->client_lock);
5713 	clp = nfsd_find_client(addr, addr_size);
5714 	if (clp) {
5715 		if (mark_client_expired_locked(clp) == nfs_ok)
5716 			++count;
5717 		else
5718 			clp = NULL;
5719 	}
5720 	spin_unlock(&nn->client_lock);
5721 
5722 	if (clp)
5723 		expire_client(clp);
5724 
5725 	return count;
5726 }
5727 
5728 u64
5729 nfsd_inject_forget_clients(u64 max)
5730 {
5731 	u64 count = 0;
5732 	struct nfs4_client *clp, *next;
5733 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5734 						nfsd_net_id);
5735 	LIST_HEAD(reaplist);
5736 
5737 	if (!nfsd_netns_ready(nn))
5738 		return count;
5739 
5740 	spin_lock(&nn->client_lock);
5741 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5742 		if (mark_client_expired_locked(clp) == nfs_ok) {
5743 			list_add(&clp->cl_lru, &reaplist);
5744 			if (max != 0 && ++count >= max)
5745 				break;
5746 		}
5747 	}
5748 	spin_unlock(&nn->client_lock);
5749 
5750 	list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
5751 		expire_client(clp);
5752 
5753 	return count;
5754 }
5755 
5756 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
5757 			     const char *type)
5758 {
5759 	char buf[INET6_ADDRSTRLEN];
5760 	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5761 	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
5762 }
5763 
5764 static void
5765 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
5766 			     struct list_head *collect)
5767 {
5768 	struct nfs4_client *clp = lst->st_stid.sc_client;
5769 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5770 					  nfsd_net_id);
5771 
5772 	if (!collect)
5773 		return;
5774 
5775 	lockdep_assert_held(&nn->client_lock);
5776 	atomic_inc(&clp->cl_refcount);
5777 	list_add(&lst->st_locks, collect);
5778 }
5779 
5780 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
5781 				    struct list_head *collect,
5782 				    void (*func)(struct nfs4_ol_stateid *))
5783 {
5784 	struct nfs4_openowner *oop;
5785 	struct nfs4_ol_stateid *stp, *st_next;
5786 	struct nfs4_ol_stateid *lst, *lst_next;
5787 	u64 count = 0;
5788 
5789 	spin_lock(&clp->cl_lock);
5790 	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5791 		list_for_each_entry_safe(stp, st_next,
5792 				&oop->oo_owner.so_stateids, st_perstateowner) {
5793 			list_for_each_entry_safe(lst, lst_next,
5794 					&stp->st_locks, st_locks) {
5795 				if (func) {
5796 					func(lst);
5797 					nfsd_inject_add_lock_to_list(lst,
5798 								collect);
5799 				}
5800 				++count;
5801 				/*
5802 				 * Despite the fact that these functions deal
5803 				 * with 64-bit integers for "count", we must
5804 				 * ensure that it doesn't blow up the
5805 				 * clp->cl_refcount. Throw a warning if we
5806 				 * start to approach INT_MAX here.
5807 				 */
5808 				WARN_ON_ONCE(count == (INT_MAX / 2));
5809 				if (count == max)
5810 					goto out;
5811 			}
5812 		}
5813 	}
5814 out:
5815 	spin_unlock(&clp->cl_lock);
5816 
5817 	return count;
5818 }
5819 
5820 static u64
5821 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
5822 			  u64 max)
5823 {
5824 	return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
5825 }
5826 
5827 static u64
5828 nfsd_print_client_locks(struct nfs4_client *clp)
5829 {
5830 	u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
5831 	nfsd_print_count(clp, count, "locked files");
5832 	return count;
5833 }
5834 
5835 u64
5836 nfsd_inject_print_locks(void)
5837 {
5838 	struct nfs4_client *clp;
5839 	u64 count = 0;
5840 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5841 						nfsd_net_id);
5842 
5843 	if (!nfsd_netns_ready(nn))
5844 		return 0;
5845 
5846 	spin_lock(&nn->client_lock);
5847 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
5848 		count += nfsd_print_client_locks(clp);
5849 	spin_unlock(&nn->client_lock);
5850 
5851 	return count;
5852 }
5853 
5854 static void
5855 nfsd_reap_locks(struct list_head *reaplist)
5856 {
5857 	struct nfs4_client *clp;
5858 	struct nfs4_ol_stateid *stp, *next;
5859 
5860 	list_for_each_entry_safe(stp, next, reaplist, st_locks) {
5861 		list_del_init(&stp->st_locks);
5862 		clp = stp->st_stid.sc_client;
5863 		nfs4_put_stid(&stp->st_stid);
5864 		put_client(clp);
5865 	}
5866 }
5867 
5868 u64
5869 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
5870 {
5871 	unsigned int count = 0;
5872 	struct nfs4_client *clp;
5873 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5874 						nfsd_net_id);
5875 	LIST_HEAD(reaplist);
5876 
5877 	if (!nfsd_netns_ready(nn))
5878 		return count;
5879 
5880 	spin_lock(&nn->client_lock);
5881 	clp = nfsd_find_client(addr, addr_size);
5882 	if (clp)
5883 		count = nfsd_collect_client_locks(clp, &reaplist, 0);
5884 	spin_unlock(&nn->client_lock);
5885 	nfsd_reap_locks(&reaplist);
5886 	return count;
5887 }
5888 
5889 u64
5890 nfsd_inject_forget_locks(u64 max)
5891 {
5892 	u64 count = 0;
5893 	struct nfs4_client *clp;
5894 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5895 						nfsd_net_id);
5896 	LIST_HEAD(reaplist);
5897 
5898 	if (!nfsd_netns_ready(nn))
5899 		return count;
5900 
5901 	spin_lock(&nn->client_lock);
5902 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5903 		count += nfsd_collect_client_locks(clp, &reaplist, max - count);
5904 		if (max != 0 && count >= max)
5905 			break;
5906 	}
5907 	spin_unlock(&nn->client_lock);
5908 	nfsd_reap_locks(&reaplist);
5909 	return count;
5910 }
5911 
5912 static u64
5913 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
5914 			      struct list_head *collect,
5915 			      void (*func)(struct nfs4_openowner *))
5916 {
5917 	struct nfs4_openowner *oop, *next;
5918 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5919 						nfsd_net_id);
5920 	u64 count = 0;
5921 
5922 	lockdep_assert_held(&nn->client_lock);
5923 
5924 	spin_lock(&clp->cl_lock);
5925 	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
5926 		if (func) {
5927 			func(oop);
5928 			if (collect) {
5929 				atomic_inc(&clp->cl_refcount);
5930 				list_add(&oop->oo_perclient, collect);
5931 			}
5932 		}
5933 		++count;
5934 		/*
5935 		 * Despite the fact that these functions deal with
5936 		 * 64-bit integers for "count", we must ensure that
5937 		 * it doesn't blow up the clp->cl_refcount. Throw a
5938 		 * warning if we start to approach INT_MAX here.
5939 		 */
5940 		WARN_ON_ONCE(count == (INT_MAX / 2));
5941 		if (count == max)
5942 			break;
5943 	}
5944 	spin_unlock(&clp->cl_lock);
5945 
5946 	return count;
5947 }
5948 
5949 static u64
5950 nfsd_print_client_openowners(struct nfs4_client *clp)
5951 {
5952 	u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
5953 
5954 	nfsd_print_count(clp, count, "openowners");
5955 	return count;
5956 }
5957 
5958 static u64
5959 nfsd_collect_client_openowners(struct nfs4_client *clp,
5960 			       struct list_head *collect, u64 max)
5961 {
5962 	return nfsd_foreach_client_openowner(clp, max, collect,
5963 						unhash_openowner_locked);
5964 }
5965 
5966 u64
5967 nfsd_inject_print_openowners(void)
5968 {
5969 	struct nfs4_client *clp;
5970 	u64 count = 0;
5971 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5972 						nfsd_net_id);
5973 
5974 	if (!nfsd_netns_ready(nn))
5975 		return 0;
5976 
5977 	spin_lock(&nn->client_lock);
5978 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
5979 		count += nfsd_print_client_openowners(clp);
5980 	spin_unlock(&nn->client_lock);
5981 
5982 	return count;
5983 }
5984 
5985 static void
5986 nfsd_reap_openowners(struct list_head *reaplist)
5987 {
5988 	struct nfs4_client *clp;
5989 	struct nfs4_openowner *oop, *next;
5990 
5991 	list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
5992 		list_del_init(&oop->oo_perclient);
5993 		clp = oop->oo_owner.so_client;
5994 		release_openowner(oop);
5995 		put_client(clp);
5996 	}
5997 }
5998 
5999 u64
6000 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6001 				     size_t addr_size)
6002 {
6003 	unsigned int count = 0;
6004 	struct nfs4_client *clp;
6005 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6006 						nfsd_net_id);
6007 	LIST_HEAD(reaplist);
6008 
6009 	if (!nfsd_netns_ready(nn))
6010 		return count;
6011 
6012 	spin_lock(&nn->client_lock);
6013 	clp = nfsd_find_client(addr, addr_size);
6014 	if (clp)
6015 		count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6016 	spin_unlock(&nn->client_lock);
6017 	nfsd_reap_openowners(&reaplist);
6018 	return count;
6019 }
6020 
6021 u64
6022 nfsd_inject_forget_openowners(u64 max)
6023 {
6024 	u64 count = 0;
6025 	struct nfs4_client *clp;
6026 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6027 						nfsd_net_id);
6028 	LIST_HEAD(reaplist);
6029 
6030 	if (!nfsd_netns_ready(nn))
6031 		return count;
6032 
6033 	spin_lock(&nn->client_lock);
6034 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6035 		count += nfsd_collect_client_openowners(clp, &reaplist,
6036 							max - count);
6037 		if (max != 0 && count >= max)
6038 			break;
6039 	}
6040 	spin_unlock(&nn->client_lock);
6041 	nfsd_reap_openowners(&reaplist);
6042 	return count;
6043 }
6044 
6045 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6046 				     struct list_head *victims)
6047 {
6048 	struct nfs4_delegation *dp, *next;
6049 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6050 						nfsd_net_id);
6051 	u64 count = 0;
6052 
6053 	lockdep_assert_held(&nn->client_lock);
6054 
6055 	spin_lock(&state_lock);
6056 	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6057 		if (victims) {
6058 			/*
6059 			 * It's not safe to mess with delegations that have a
6060 			 * non-zero dl_time. They might have already been broken
6061 			 * and could be processed by the laundromat outside of
6062 			 * the state_lock. Just leave them be.
6063 			 */
6064 			if (dp->dl_time != 0)
6065 				continue;
6066 
6067 			atomic_inc(&clp->cl_refcount);
6068 			unhash_delegation_locked(dp);
6069 			list_add(&dp->dl_recall_lru, victims);
6070 		}
6071 		++count;
6072 		/*
6073 		 * Despite the fact that these functions deal with
6074 		 * 64-bit integers for "count", we must ensure that
6075 		 * it doesn't blow up the clp->cl_refcount. Throw a
6076 		 * warning if we start to approach INT_MAX here.
6077 		 */
6078 		WARN_ON_ONCE(count == (INT_MAX / 2));
6079 		if (count == max)
6080 			break;
6081 	}
6082 	spin_unlock(&state_lock);
6083 	return count;
6084 }
6085 
6086 static u64
6087 nfsd_print_client_delegations(struct nfs4_client *clp)
6088 {
6089 	u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6090 
6091 	nfsd_print_count(clp, count, "delegations");
6092 	return count;
6093 }
6094 
6095 u64
6096 nfsd_inject_print_delegations(void)
6097 {
6098 	struct nfs4_client *clp;
6099 	u64 count = 0;
6100 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6101 						nfsd_net_id);
6102 
6103 	if (!nfsd_netns_ready(nn))
6104 		return 0;
6105 
6106 	spin_lock(&nn->client_lock);
6107 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6108 		count += nfsd_print_client_delegations(clp);
6109 	spin_unlock(&nn->client_lock);
6110 
6111 	return count;
6112 }
6113 
6114 static void
6115 nfsd_forget_delegations(struct list_head *reaplist)
6116 {
6117 	struct nfs4_client *clp;
6118 	struct nfs4_delegation *dp, *next;
6119 
6120 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6121 		list_del_init(&dp->dl_recall_lru);
6122 		clp = dp->dl_stid.sc_client;
6123 		revoke_delegation(dp);
6124 		put_client(clp);
6125 	}
6126 }
6127 
6128 u64
6129 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6130 				      size_t addr_size)
6131 {
6132 	u64 count = 0;
6133 	struct nfs4_client *clp;
6134 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6135 						nfsd_net_id);
6136 	LIST_HEAD(reaplist);
6137 
6138 	if (!nfsd_netns_ready(nn))
6139 		return count;
6140 
6141 	spin_lock(&nn->client_lock);
6142 	clp = nfsd_find_client(addr, addr_size);
6143 	if (clp)
6144 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6145 	spin_unlock(&nn->client_lock);
6146 
6147 	nfsd_forget_delegations(&reaplist);
6148 	return count;
6149 }
6150 
6151 u64
6152 nfsd_inject_forget_delegations(u64 max)
6153 {
6154 	u64 count = 0;
6155 	struct nfs4_client *clp;
6156 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6157 						nfsd_net_id);
6158 	LIST_HEAD(reaplist);
6159 
6160 	if (!nfsd_netns_ready(nn))
6161 		return count;
6162 
6163 	spin_lock(&nn->client_lock);
6164 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6165 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6166 		if (max != 0 && count >= max)
6167 			break;
6168 	}
6169 	spin_unlock(&nn->client_lock);
6170 	nfsd_forget_delegations(&reaplist);
6171 	return count;
6172 }
6173 
6174 static void
6175 nfsd_recall_delegations(struct list_head *reaplist)
6176 {
6177 	struct nfs4_client *clp;
6178 	struct nfs4_delegation *dp, *next;
6179 
6180 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6181 		list_del_init(&dp->dl_recall_lru);
6182 		clp = dp->dl_stid.sc_client;
6183 		/*
6184 		 * We skipped all entries that had a zero dl_time before,
6185 		 * so we can now reset the dl_time back to 0. If a delegation
6186 		 * break comes in now, then it won't make any difference since
6187 		 * we're recalling it either way.
6188 		 */
6189 		spin_lock(&state_lock);
6190 		dp->dl_time = 0;
6191 		spin_unlock(&state_lock);
6192 		nfsd_break_one_deleg(dp);
6193 		put_client(clp);
6194 	}
6195 }
6196 
6197 u64
6198 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6199 				      size_t addr_size)
6200 {
6201 	u64 count = 0;
6202 	struct nfs4_client *clp;
6203 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6204 						nfsd_net_id);
6205 	LIST_HEAD(reaplist);
6206 
6207 	if (!nfsd_netns_ready(nn))
6208 		return count;
6209 
6210 	spin_lock(&nn->client_lock);
6211 	clp = nfsd_find_client(addr, addr_size);
6212 	if (clp)
6213 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6214 	spin_unlock(&nn->client_lock);
6215 
6216 	nfsd_recall_delegations(&reaplist);
6217 	return count;
6218 }
6219 
6220 u64
6221 nfsd_inject_recall_delegations(u64 max)
6222 {
6223 	u64 count = 0;
6224 	struct nfs4_client *clp, *next;
6225 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6226 						nfsd_net_id);
6227 	LIST_HEAD(reaplist);
6228 
6229 	if (!nfsd_netns_ready(nn))
6230 		return count;
6231 
6232 	spin_lock(&nn->client_lock);
6233 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6234 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6235 		if (max != 0 && ++count >= max)
6236 			break;
6237 	}
6238 	spin_unlock(&nn->client_lock);
6239 	nfsd_recall_delegations(&reaplist);
6240 	return count;
6241 }
6242 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6243 
6244 /*
6245  * Since the lifetime of a delegation isn't limited to that of an open, a
6246  * client may quite reasonably hang on to a delegation as long as it has
6247  * the inode cached.  This becomes an obvious problem the first time a
6248  * client's inode cache approaches the size of the server's total memory.
6249  *
6250  * For now we avoid this problem by imposing a hard limit on the number
6251  * of delegations, which varies according to the server's memory size.
6252  */
6253 static void
6254 set_max_delegations(void)
6255 {
6256 	/*
6257 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
6258 	 * estimates suggest that in the worst case (where every delegation
6259 	 * is for a different inode), a delegation could take about 1.5K,
6260 	 * giving a worst case usage of about 6% of memory.
6261 	 */
6262 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
6263 }
6264 
6265 static int nfs4_state_create_net(struct net *net)
6266 {
6267 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6268 	int i;
6269 
6270 	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6271 			CLIENT_HASH_SIZE, GFP_KERNEL);
6272 	if (!nn->conf_id_hashtbl)
6273 		goto err;
6274 	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6275 			CLIENT_HASH_SIZE, GFP_KERNEL);
6276 	if (!nn->unconf_id_hashtbl)
6277 		goto err_unconf_id;
6278 	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
6279 			SESSION_HASH_SIZE, GFP_KERNEL);
6280 	if (!nn->sessionid_hashtbl)
6281 		goto err_sessionid;
6282 
6283 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6284 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
6285 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
6286 	}
6287 	for (i = 0; i < SESSION_HASH_SIZE; i++)
6288 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6289 	nn->conf_name_tree = RB_ROOT;
6290 	nn->unconf_name_tree = RB_ROOT;
6291 	INIT_LIST_HEAD(&nn->client_lru);
6292 	INIT_LIST_HEAD(&nn->close_lru);
6293 	INIT_LIST_HEAD(&nn->del_recall_lru);
6294 	spin_lock_init(&nn->client_lock);
6295 
6296 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6297 	get_net(net);
6298 
6299 	return 0;
6300 
6301 err_sessionid:
6302 	kfree(nn->unconf_id_hashtbl);
6303 err_unconf_id:
6304 	kfree(nn->conf_id_hashtbl);
6305 err:
6306 	return -ENOMEM;
6307 }
6308 
6309 static void
6310 nfs4_state_destroy_net(struct net *net)
6311 {
6312 	int i;
6313 	struct nfs4_client *clp = NULL;
6314 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6315 
6316 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6317 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
6318 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6319 			destroy_client(clp);
6320 		}
6321 	}
6322 
6323 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6324 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
6325 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6326 			destroy_client(clp);
6327 		}
6328 	}
6329 
6330 	kfree(nn->sessionid_hashtbl);
6331 	kfree(nn->unconf_id_hashtbl);
6332 	kfree(nn->conf_id_hashtbl);
6333 	put_net(net);
6334 }
6335 
6336 int
6337 nfs4_state_start_net(struct net *net)
6338 {
6339 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6340 	int ret;
6341 
6342 	ret = nfs4_state_create_net(net);
6343 	if (ret)
6344 		return ret;
6345 	nfsd4_client_tracking_init(net);
6346 	nn->boot_time = get_seconds();
6347 	locks_start_grace(net, &nn->nfsd4_manager);
6348 	nn->grace_ended = false;
6349 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
6350 	       nn->nfsd4_grace, net);
6351 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
6352 	return 0;
6353 }
6354 
6355 /* initialization to perform when the nfsd service is started: */
6356 
6357 int
6358 nfs4_state_start(void)
6359 {
6360 	int ret;
6361 
6362 	ret = set_callback_cred();
6363 	if (ret)
6364 		return -ENOMEM;
6365 	laundry_wq = create_singlethread_workqueue("nfsd4");
6366 	if (laundry_wq == NULL) {
6367 		ret = -ENOMEM;
6368 		goto out_recovery;
6369 	}
6370 	ret = nfsd4_create_callback_queue();
6371 	if (ret)
6372 		goto out_free_laundry;
6373 
6374 	set_max_delegations();
6375 
6376 	return 0;
6377 
6378 out_free_laundry:
6379 	destroy_workqueue(laundry_wq);
6380 out_recovery:
6381 	return ret;
6382 }
6383 
6384 void
6385 nfs4_state_shutdown_net(struct net *net)
6386 {
6387 	struct nfs4_delegation *dp = NULL;
6388 	struct list_head *pos, *next, reaplist;
6389 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6390 
6391 	cancel_delayed_work_sync(&nn->laundromat_work);
6392 	locks_end_grace(&nn->nfsd4_manager);
6393 
6394 	INIT_LIST_HEAD(&reaplist);
6395 	spin_lock(&state_lock);
6396 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
6397 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6398 		unhash_delegation_locked(dp);
6399 		list_add(&dp->dl_recall_lru, &reaplist);
6400 	}
6401 	spin_unlock(&state_lock);
6402 	list_for_each_safe(pos, next, &reaplist) {
6403 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6404 		list_del_init(&dp->dl_recall_lru);
6405 		nfs4_put_stid(&dp->dl_stid);
6406 	}
6407 
6408 	nfsd4_client_tracking_exit(net);
6409 	nfs4_state_destroy_net(net);
6410 }
6411 
6412 void
6413 nfs4_state_shutdown(void)
6414 {
6415 	destroy_workqueue(laundry_wq);
6416 	nfsd4_destroy_callback_queue();
6417 }
6418 
6419 static void
6420 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6421 {
6422 	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
6423 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
6424 }
6425 
6426 static void
6427 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6428 {
6429 	if (cstate->minorversion) {
6430 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
6431 		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6432 	}
6433 }
6434 
6435 void
6436 clear_current_stateid(struct nfsd4_compound_state *cstate)
6437 {
6438 	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6439 }
6440 
6441 /*
6442  * functions to set current state id
6443  */
6444 void
6445 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6446 {
6447 	put_stateid(cstate, &odp->od_stateid);
6448 }
6449 
6450 void
6451 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
6452 {
6453 	put_stateid(cstate, &open->op_stateid);
6454 }
6455 
6456 void
6457 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6458 {
6459 	put_stateid(cstate, &close->cl_stateid);
6460 }
6461 
6462 void
6463 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
6464 {
6465 	put_stateid(cstate, &lock->lk_resp_stateid);
6466 }
6467 
6468 /*
6469  * functions to consume current state id
6470  */
6471 
6472 void
6473 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6474 {
6475 	get_stateid(cstate, &odp->od_stateid);
6476 }
6477 
6478 void
6479 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
6480 {
6481 	get_stateid(cstate, &drp->dr_stateid);
6482 }
6483 
6484 void
6485 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
6486 {
6487 	get_stateid(cstate, &fsp->fr_stateid);
6488 }
6489 
6490 void
6491 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
6492 {
6493 	get_stateid(cstate, &setattr->sa_stateid);
6494 }
6495 
6496 void
6497 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6498 {
6499 	get_stateid(cstate, &close->cl_stateid);
6500 }
6501 
6502 void
6503 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
6504 {
6505 	get_stateid(cstate, &locku->lu_stateid);
6506 }
6507 
6508 void
6509 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
6510 {
6511 	get_stateid(cstate, &read->rd_stateid);
6512 }
6513 
6514 void
6515 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
6516 {
6517 	get_stateid(cstate, &write->wr_stateid);
6518 }
6519