xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision 5f66f73b)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include "xdr4.h"
48 #include "xdr4cb.h"
49 #include "vfs.h"
50 #include "current_stateid.h"
51 
52 #include "netns.h"
53 #include "pnfs.h"
54 #include "filecache.h"
55 #include "trace.h"
56 
57 #define NFSDDBG_FACILITY                NFSDDBG_PROC
58 
59 #define all_ones {{~0,~0},~0}
60 static const stateid_t one_stateid = {
61 	.si_generation = ~0,
62 	.si_opaque = all_ones,
63 };
64 static const stateid_t zero_stateid = {
65 	/* all fields zero */
66 };
67 static const stateid_t currentstateid = {
68 	.si_generation = 1,
69 };
70 static const stateid_t close_stateid = {
71 	.si_generation = 0xffffffffU,
72 };
73 
74 static u64 current_sessionid = 1;
75 
76 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
77 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
78 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
79 #define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
80 
81 /* forward declarations */
82 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
83 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
84 void nfsd4_end_grace(struct nfsd_net *nn);
85 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
86 
87 /* Locking: */
88 
89 /*
90  * Currently used for the del_recall_lru and file hash table.  In an
91  * effort to decrease the scope of the client_mutex, this spinlock may
92  * eventually cover more:
93  */
94 static DEFINE_SPINLOCK(state_lock);
95 
96 enum nfsd4_st_mutex_lock_subclass {
97 	OPEN_STATEID_MUTEX = 0,
98 	LOCK_STATEID_MUTEX = 1,
99 };
100 
101 /*
102  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
103  * the refcount on the open stateid to drop.
104  */
105 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
106 
107 /*
108  * A waitqueue where a writer to clients/#/ctl destroying a client can
109  * wait for cl_rpc_users to drop to 0 and then for the client to be
110  * unhashed.
111  */
112 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
113 
114 static struct kmem_cache *client_slab;
115 static struct kmem_cache *openowner_slab;
116 static struct kmem_cache *lockowner_slab;
117 static struct kmem_cache *file_slab;
118 static struct kmem_cache *stateid_slab;
119 static struct kmem_cache *deleg_slab;
120 static struct kmem_cache *odstate_slab;
121 
122 static void free_session(struct nfsd4_session *);
123 
124 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
125 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
126 
127 static bool is_session_dead(struct nfsd4_session *ses)
128 {
129 	return ses->se_flags & NFS4_SESSION_DEAD;
130 }
131 
132 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
133 {
134 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
135 		return nfserr_jukebox;
136 	ses->se_flags |= NFS4_SESSION_DEAD;
137 	return nfs_ok;
138 }
139 
140 static bool is_client_expired(struct nfs4_client *clp)
141 {
142 	return clp->cl_time == 0;
143 }
144 
145 static __be32 get_client_locked(struct nfs4_client *clp)
146 {
147 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
148 
149 	lockdep_assert_held(&nn->client_lock);
150 
151 	if (is_client_expired(clp))
152 		return nfserr_expired;
153 	atomic_inc(&clp->cl_rpc_users);
154 	return nfs_ok;
155 }
156 
157 /* must be called under the client_lock */
158 static inline void
159 renew_client_locked(struct nfs4_client *clp)
160 {
161 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
162 
163 	if (is_client_expired(clp)) {
164 		WARN_ON(1);
165 		printk("%s: client (clientid %08x/%08x) already expired\n",
166 			__func__,
167 			clp->cl_clientid.cl_boot,
168 			clp->cl_clientid.cl_id);
169 		return;
170 	}
171 
172 	list_move_tail(&clp->cl_lru, &nn->client_lru);
173 	clp->cl_time = ktime_get_boottime_seconds();
174 }
175 
176 static void put_client_renew_locked(struct nfs4_client *clp)
177 {
178 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
179 
180 	lockdep_assert_held(&nn->client_lock);
181 
182 	if (!atomic_dec_and_test(&clp->cl_rpc_users))
183 		return;
184 	if (!is_client_expired(clp))
185 		renew_client_locked(clp);
186 	else
187 		wake_up_all(&expiry_wq);
188 }
189 
190 static void put_client_renew(struct nfs4_client *clp)
191 {
192 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
193 
194 	if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
195 		return;
196 	if (!is_client_expired(clp))
197 		renew_client_locked(clp);
198 	else
199 		wake_up_all(&expiry_wq);
200 	spin_unlock(&nn->client_lock);
201 }
202 
203 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
204 {
205 	__be32 status;
206 
207 	if (is_session_dead(ses))
208 		return nfserr_badsession;
209 	status = get_client_locked(ses->se_client);
210 	if (status)
211 		return status;
212 	atomic_inc(&ses->se_ref);
213 	return nfs_ok;
214 }
215 
216 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
217 {
218 	struct nfs4_client *clp = ses->se_client;
219 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
220 
221 	lockdep_assert_held(&nn->client_lock);
222 
223 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
224 		free_session(ses);
225 	put_client_renew_locked(clp);
226 }
227 
228 static void nfsd4_put_session(struct nfsd4_session *ses)
229 {
230 	struct nfs4_client *clp = ses->se_client;
231 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
232 
233 	spin_lock(&nn->client_lock);
234 	nfsd4_put_session_locked(ses);
235 	spin_unlock(&nn->client_lock);
236 }
237 
238 static struct nfsd4_blocked_lock *
239 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
240 			struct nfsd_net *nn)
241 {
242 	struct nfsd4_blocked_lock *cur, *found = NULL;
243 
244 	spin_lock(&nn->blocked_locks_lock);
245 	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
246 		if (fh_match(fh, &cur->nbl_fh)) {
247 			list_del_init(&cur->nbl_list);
248 			list_del_init(&cur->nbl_lru);
249 			found = cur;
250 			break;
251 		}
252 	}
253 	spin_unlock(&nn->blocked_locks_lock);
254 	if (found)
255 		locks_delete_block(&found->nbl_lock);
256 	return found;
257 }
258 
259 static struct nfsd4_blocked_lock *
260 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
261 			struct nfsd_net *nn)
262 {
263 	struct nfsd4_blocked_lock *nbl;
264 
265 	nbl = find_blocked_lock(lo, fh, nn);
266 	if (!nbl) {
267 		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
268 		if (nbl) {
269 			INIT_LIST_HEAD(&nbl->nbl_list);
270 			INIT_LIST_HEAD(&nbl->nbl_lru);
271 			fh_copy_shallow(&nbl->nbl_fh, fh);
272 			locks_init_lock(&nbl->nbl_lock);
273 			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
274 					&nfsd4_cb_notify_lock_ops,
275 					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
276 		}
277 	}
278 	return nbl;
279 }
280 
281 static void
282 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
283 {
284 	locks_delete_block(&nbl->nbl_lock);
285 	locks_release_private(&nbl->nbl_lock);
286 	kfree(nbl);
287 }
288 
289 static void
290 remove_blocked_locks(struct nfs4_lockowner *lo)
291 {
292 	struct nfs4_client *clp = lo->lo_owner.so_client;
293 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
294 	struct nfsd4_blocked_lock *nbl;
295 	LIST_HEAD(reaplist);
296 
297 	/* Dequeue all blocked locks */
298 	spin_lock(&nn->blocked_locks_lock);
299 	while (!list_empty(&lo->lo_blocked)) {
300 		nbl = list_first_entry(&lo->lo_blocked,
301 					struct nfsd4_blocked_lock,
302 					nbl_list);
303 		list_del_init(&nbl->nbl_list);
304 		list_move(&nbl->nbl_lru, &reaplist);
305 	}
306 	spin_unlock(&nn->blocked_locks_lock);
307 
308 	/* Now free them */
309 	while (!list_empty(&reaplist)) {
310 		nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
311 					nbl_lru);
312 		list_del_init(&nbl->nbl_lru);
313 		free_blocked_lock(nbl);
314 	}
315 }
316 
317 static void
318 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
319 {
320 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
321 						struct nfsd4_blocked_lock, nbl_cb);
322 	locks_delete_block(&nbl->nbl_lock);
323 }
324 
325 static int
326 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
327 {
328 	/*
329 	 * Since this is just an optimization, we don't try very hard if it
330 	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
331 	 * just quit trying on anything else.
332 	 */
333 	switch (task->tk_status) {
334 	case -NFS4ERR_DELAY:
335 		rpc_delay(task, 1 * HZ);
336 		return 0;
337 	default:
338 		return 1;
339 	}
340 }
341 
342 static void
343 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
344 {
345 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
346 						struct nfsd4_blocked_lock, nbl_cb);
347 
348 	free_blocked_lock(nbl);
349 }
350 
351 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
352 	.prepare	= nfsd4_cb_notify_lock_prepare,
353 	.done		= nfsd4_cb_notify_lock_done,
354 	.release	= nfsd4_cb_notify_lock_release,
355 };
356 
357 static inline struct nfs4_stateowner *
358 nfs4_get_stateowner(struct nfs4_stateowner *sop)
359 {
360 	atomic_inc(&sop->so_count);
361 	return sop;
362 }
363 
364 static int
365 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
366 {
367 	return (sop->so_owner.len == owner->len) &&
368 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
369 }
370 
371 static struct nfs4_openowner *
372 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
373 			struct nfs4_client *clp)
374 {
375 	struct nfs4_stateowner *so;
376 
377 	lockdep_assert_held(&clp->cl_lock);
378 
379 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
380 			    so_strhash) {
381 		if (!so->so_is_open_owner)
382 			continue;
383 		if (same_owner_str(so, &open->op_owner))
384 			return openowner(nfs4_get_stateowner(so));
385 	}
386 	return NULL;
387 }
388 
389 static struct nfs4_openowner *
390 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
391 			struct nfs4_client *clp)
392 {
393 	struct nfs4_openowner *oo;
394 
395 	spin_lock(&clp->cl_lock);
396 	oo = find_openstateowner_str_locked(hashval, open, clp);
397 	spin_unlock(&clp->cl_lock);
398 	return oo;
399 }
400 
401 static inline u32
402 opaque_hashval(const void *ptr, int nbytes)
403 {
404 	unsigned char *cptr = (unsigned char *) ptr;
405 
406 	u32 x = 0;
407 	while (nbytes--) {
408 		x *= 37;
409 		x += *cptr++;
410 	}
411 	return x;
412 }
413 
414 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
415 {
416 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
417 
418 	kmem_cache_free(file_slab, fp);
419 }
420 
421 void
422 put_nfs4_file(struct nfs4_file *fi)
423 {
424 	might_lock(&state_lock);
425 
426 	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
427 		hlist_del_rcu(&fi->fi_hash);
428 		spin_unlock(&state_lock);
429 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
430 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
431 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
432 	}
433 }
434 
435 static struct nfsd_file *
436 __nfs4_get_fd(struct nfs4_file *f, int oflag)
437 {
438 	if (f->fi_fds[oflag])
439 		return nfsd_file_get(f->fi_fds[oflag]);
440 	return NULL;
441 }
442 
443 static struct nfsd_file *
444 find_writeable_file_locked(struct nfs4_file *f)
445 {
446 	struct nfsd_file *ret;
447 
448 	lockdep_assert_held(&f->fi_lock);
449 
450 	ret = __nfs4_get_fd(f, O_WRONLY);
451 	if (!ret)
452 		ret = __nfs4_get_fd(f, O_RDWR);
453 	return ret;
454 }
455 
456 static struct nfsd_file *
457 find_writeable_file(struct nfs4_file *f)
458 {
459 	struct nfsd_file *ret;
460 
461 	spin_lock(&f->fi_lock);
462 	ret = find_writeable_file_locked(f);
463 	spin_unlock(&f->fi_lock);
464 
465 	return ret;
466 }
467 
468 static struct nfsd_file *
469 find_readable_file_locked(struct nfs4_file *f)
470 {
471 	struct nfsd_file *ret;
472 
473 	lockdep_assert_held(&f->fi_lock);
474 
475 	ret = __nfs4_get_fd(f, O_RDONLY);
476 	if (!ret)
477 		ret = __nfs4_get_fd(f, O_RDWR);
478 	return ret;
479 }
480 
481 static struct nfsd_file *
482 find_readable_file(struct nfs4_file *f)
483 {
484 	struct nfsd_file *ret;
485 
486 	spin_lock(&f->fi_lock);
487 	ret = find_readable_file_locked(f);
488 	spin_unlock(&f->fi_lock);
489 
490 	return ret;
491 }
492 
493 struct nfsd_file *
494 find_any_file(struct nfs4_file *f)
495 {
496 	struct nfsd_file *ret;
497 
498 	if (!f)
499 		return NULL;
500 	spin_lock(&f->fi_lock);
501 	ret = __nfs4_get_fd(f, O_RDWR);
502 	if (!ret) {
503 		ret = __nfs4_get_fd(f, O_WRONLY);
504 		if (!ret)
505 			ret = __nfs4_get_fd(f, O_RDONLY);
506 	}
507 	spin_unlock(&f->fi_lock);
508 	return ret;
509 }
510 
511 static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
512 {
513 	struct nfsd_file *ret = NULL;
514 
515 	spin_lock(&f->fi_lock);
516 	if (f->fi_deleg_file)
517 		ret = nfsd_file_get(f->fi_deleg_file);
518 	spin_unlock(&f->fi_lock);
519 	return ret;
520 }
521 
522 static atomic_long_t num_delegations;
523 unsigned long max_delegations;
524 
525 /*
526  * Open owner state (share locks)
527  */
528 
529 /* hash tables for lock and open owners */
530 #define OWNER_HASH_BITS              8
531 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
532 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
533 
534 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
535 {
536 	unsigned int ret;
537 
538 	ret = opaque_hashval(ownername->data, ownername->len);
539 	return ret & OWNER_HASH_MASK;
540 }
541 
542 /* hash table for nfs4_file */
543 #define FILE_HASH_BITS                   8
544 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
545 
546 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
547 {
548 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
549 }
550 
551 static unsigned int file_hashval(struct knfsd_fh *fh)
552 {
553 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
554 }
555 
556 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
557 
558 static void
559 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
560 {
561 	lockdep_assert_held(&fp->fi_lock);
562 
563 	if (access & NFS4_SHARE_ACCESS_WRITE)
564 		atomic_inc(&fp->fi_access[O_WRONLY]);
565 	if (access & NFS4_SHARE_ACCESS_READ)
566 		atomic_inc(&fp->fi_access[O_RDONLY]);
567 }
568 
569 static __be32
570 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
571 {
572 	lockdep_assert_held(&fp->fi_lock);
573 
574 	/* Does this access mode make sense? */
575 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
576 		return nfserr_inval;
577 
578 	/* Does it conflict with a deny mode already set? */
579 	if ((access & fp->fi_share_deny) != 0)
580 		return nfserr_share_denied;
581 
582 	__nfs4_file_get_access(fp, access);
583 	return nfs_ok;
584 }
585 
586 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
587 {
588 	/* Common case is that there is no deny mode. */
589 	if (deny) {
590 		/* Does this deny mode make sense? */
591 		if (deny & ~NFS4_SHARE_DENY_BOTH)
592 			return nfserr_inval;
593 
594 		if ((deny & NFS4_SHARE_DENY_READ) &&
595 		    atomic_read(&fp->fi_access[O_RDONLY]))
596 			return nfserr_share_denied;
597 
598 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
599 		    atomic_read(&fp->fi_access[O_WRONLY]))
600 			return nfserr_share_denied;
601 	}
602 	return nfs_ok;
603 }
604 
605 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
606 {
607 	might_lock(&fp->fi_lock);
608 
609 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
610 		struct nfsd_file *f1 = NULL;
611 		struct nfsd_file *f2 = NULL;
612 
613 		swap(f1, fp->fi_fds[oflag]);
614 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
615 			swap(f2, fp->fi_fds[O_RDWR]);
616 		spin_unlock(&fp->fi_lock);
617 		if (f1)
618 			nfsd_file_put(f1);
619 		if (f2)
620 			nfsd_file_put(f2);
621 	}
622 }
623 
624 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
625 {
626 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
627 
628 	if (access & NFS4_SHARE_ACCESS_WRITE)
629 		__nfs4_file_put_access(fp, O_WRONLY);
630 	if (access & NFS4_SHARE_ACCESS_READ)
631 		__nfs4_file_put_access(fp, O_RDONLY);
632 }
633 
634 /*
635  * Allocate a new open/delegation state counter. This is needed for
636  * pNFS for proper return on close semantics.
637  *
638  * Note that we only allocate it for pNFS-enabled exports, otherwise
639  * all pointers to struct nfs4_clnt_odstate are always NULL.
640  */
641 static struct nfs4_clnt_odstate *
642 alloc_clnt_odstate(struct nfs4_client *clp)
643 {
644 	struct nfs4_clnt_odstate *co;
645 
646 	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
647 	if (co) {
648 		co->co_client = clp;
649 		refcount_set(&co->co_odcount, 1);
650 	}
651 	return co;
652 }
653 
654 static void
655 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
656 {
657 	struct nfs4_file *fp = co->co_file;
658 
659 	lockdep_assert_held(&fp->fi_lock);
660 	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
661 }
662 
663 static inline void
664 get_clnt_odstate(struct nfs4_clnt_odstate *co)
665 {
666 	if (co)
667 		refcount_inc(&co->co_odcount);
668 }
669 
670 static void
671 put_clnt_odstate(struct nfs4_clnt_odstate *co)
672 {
673 	struct nfs4_file *fp;
674 
675 	if (!co)
676 		return;
677 
678 	fp = co->co_file;
679 	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
680 		list_del(&co->co_perfile);
681 		spin_unlock(&fp->fi_lock);
682 
683 		nfsd4_return_all_file_layouts(co->co_client, fp);
684 		kmem_cache_free(odstate_slab, co);
685 	}
686 }
687 
688 static struct nfs4_clnt_odstate *
689 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
690 {
691 	struct nfs4_clnt_odstate *co;
692 	struct nfs4_client *cl;
693 
694 	if (!new)
695 		return NULL;
696 
697 	cl = new->co_client;
698 
699 	spin_lock(&fp->fi_lock);
700 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
701 		if (co->co_client == cl) {
702 			get_clnt_odstate(co);
703 			goto out;
704 		}
705 	}
706 	co = new;
707 	co->co_file = fp;
708 	hash_clnt_odstate_locked(new);
709 out:
710 	spin_unlock(&fp->fi_lock);
711 	return co;
712 }
713 
714 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
715 				  void (*sc_free)(struct nfs4_stid *))
716 {
717 	struct nfs4_stid *stid;
718 	int new_id;
719 
720 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
721 	if (!stid)
722 		return NULL;
723 
724 	idr_preload(GFP_KERNEL);
725 	spin_lock(&cl->cl_lock);
726 	/* Reserving 0 for start of file in nfsdfs "states" file: */
727 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
728 	spin_unlock(&cl->cl_lock);
729 	idr_preload_end();
730 	if (new_id < 0)
731 		goto out_free;
732 
733 	stid->sc_free = sc_free;
734 	stid->sc_client = cl;
735 	stid->sc_stateid.si_opaque.so_id = new_id;
736 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
737 	/* Will be incremented before return to client: */
738 	refcount_set(&stid->sc_count, 1);
739 	spin_lock_init(&stid->sc_lock);
740 	INIT_LIST_HEAD(&stid->sc_cp_list);
741 
742 	/*
743 	 * It shouldn't be a problem to reuse an opaque stateid value.
744 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
745 	 * example, a stray write retransmission could be accepted by
746 	 * the server when it should have been rejected.  Therefore,
747 	 * adopt a trick from the sctp code to attempt to maximize the
748 	 * amount of time until an id is reused, by ensuring they always
749 	 * "increase" (mod INT_MAX):
750 	 */
751 	return stid;
752 out_free:
753 	kmem_cache_free(slab, stid);
754 	return NULL;
755 }
756 
757 /*
758  * Create a unique stateid_t to represent each COPY.
759  */
760 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
761 			      unsigned char sc_type)
762 {
763 	int new_id;
764 
765 	stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
766 	stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
767 	stid->sc_type = sc_type;
768 
769 	idr_preload(GFP_KERNEL);
770 	spin_lock(&nn->s2s_cp_lock);
771 	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
772 	stid->stid.si_opaque.so_id = new_id;
773 	stid->stid.si_generation = 1;
774 	spin_unlock(&nn->s2s_cp_lock);
775 	idr_preload_end();
776 	if (new_id < 0)
777 		return 0;
778 	return 1;
779 }
780 
781 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
782 {
783 	return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
784 }
785 
786 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
787 						     struct nfs4_stid *p_stid)
788 {
789 	struct nfs4_cpntf_state *cps;
790 
791 	cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
792 	if (!cps)
793 		return NULL;
794 	cps->cpntf_time = ktime_get_boottime_seconds();
795 	refcount_set(&cps->cp_stateid.sc_count, 1);
796 	if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
797 		goto out_free;
798 	spin_lock(&nn->s2s_cp_lock);
799 	list_add(&cps->cp_list, &p_stid->sc_cp_list);
800 	spin_unlock(&nn->s2s_cp_lock);
801 	return cps;
802 out_free:
803 	kfree(cps);
804 	return NULL;
805 }
806 
807 void nfs4_free_copy_state(struct nfsd4_copy *copy)
808 {
809 	struct nfsd_net *nn;
810 
811 	WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
812 	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
813 	spin_lock(&nn->s2s_cp_lock);
814 	idr_remove(&nn->s2s_cp_stateids,
815 		   copy->cp_stateid.stid.si_opaque.so_id);
816 	spin_unlock(&nn->s2s_cp_lock);
817 }
818 
819 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
820 {
821 	struct nfs4_cpntf_state *cps;
822 	struct nfsd_net *nn;
823 
824 	nn = net_generic(net, nfsd_net_id);
825 	spin_lock(&nn->s2s_cp_lock);
826 	while (!list_empty(&stid->sc_cp_list)) {
827 		cps = list_first_entry(&stid->sc_cp_list,
828 				       struct nfs4_cpntf_state, cp_list);
829 		_free_cpntf_state_locked(nn, cps);
830 	}
831 	spin_unlock(&nn->s2s_cp_lock);
832 }
833 
834 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
835 {
836 	struct nfs4_stid *stid;
837 
838 	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
839 	if (!stid)
840 		return NULL;
841 
842 	return openlockstateid(stid);
843 }
844 
845 static void nfs4_free_deleg(struct nfs4_stid *stid)
846 {
847 	kmem_cache_free(deleg_slab, stid);
848 	atomic_long_dec(&num_delegations);
849 }
850 
851 /*
852  * When we recall a delegation, we should be careful not to hand it
853  * out again straight away.
854  * To ensure this we keep a pair of bloom filters ('new' and 'old')
855  * in which the filehandles of recalled delegations are "stored".
856  * If a filehandle appear in either filter, a delegation is blocked.
857  * When a delegation is recalled, the filehandle is stored in the "new"
858  * filter.
859  * Every 30 seconds we swap the filters and clear the "new" one,
860  * unless both are empty of course.
861  *
862  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
863  * low 3 bytes as hash-table indices.
864  *
865  * 'blocked_delegations_lock', which is always taken in block_delegations(),
866  * is used to manage concurrent access.  Testing does not need the lock
867  * except when swapping the two filters.
868  */
869 static DEFINE_SPINLOCK(blocked_delegations_lock);
870 static struct bloom_pair {
871 	int	entries, old_entries;
872 	time64_t swap_time;
873 	int	new; /* index into 'set' */
874 	DECLARE_BITMAP(set[2], 256);
875 } blocked_delegations;
876 
877 static int delegation_blocked(struct knfsd_fh *fh)
878 {
879 	u32 hash;
880 	struct bloom_pair *bd = &blocked_delegations;
881 
882 	if (bd->entries == 0)
883 		return 0;
884 	if (ktime_get_seconds() - bd->swap_time > 30) {
885 		spin_lock(&blocked_delegations_lock);
886 		if (ktime_get_seconds() - bd->swap_time > 30) {
887 			bd->entries -= bd->old_entries;
888 			bd->old_entries = bd->entries;
889 			memset(bd->set[bd->new], 0,
890 			       sizeof(bd->set[0]));
891 			bd->new = 1-bd->new;
892 			bd->swap_time = ktime_get_seconds();
893 		}
894 		spin_unlock(&blocked_delegations_lock);
895 	}
896 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
897 	if (test_bit(hash&255, bd->set[0]) &&
898 	    test_bit((hash>>8)&255, bd->set[0]) &&
899 	    test_bit((hash>>16)&255, bd->set[0]))
900 		return 1;
901 
902 	if (test_bit(hash&255, bd->set[1]) &&
903 	    test_bit((hash>>8)&255, bd->set[1]) &&
904 	    test_bit((hash>>16)&255, bd->set[1]))
905 		return 1;
906 
907 	return 0;
908 }
909 
910 static void block_delegations(struct knfsd_fh *fh)
911 {
912 	u32 hash;
913 	struct bloom_pair *bd = &blocked_delegations;
914 
915 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
916 
917 	spin_lock(&blocked_delegations_lock);
918 	__set_bit(hash&255, bd->set[bd->new]);
919 	__set_bit((hash>>8)&255, bd->set[bd->new]);
920 	__set_bit((hash>>16)&255, bd->set[bd->new]);
921 	if (bd->entries == 0)
922 		bd->swap_time = ktime_get_seconds();
923 	bd->entries += 1;
924 	spin_unlock(&blocked_delegations_lock);
925 }
926 
927 static struct nfs4_delegation *
928 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
929 		 struct svc_fh *current_fh,
930 		 struct nfs4_clnt_odstate *odstate)
931 {
932 	struct nfs4_delegation *dp;
933 	long n;
934 
935 	dprintk("NFSD alloc_init_deleg\n");
936 	n = atomic_long_inc_return(&num_delegations);
937 	if (n < 0 || n > max_delegations)
938 		goto out_dec;
939 	if (delegation_blocked(&current_fh->fh_handle))
940 		goto out_dec;
941 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
942 	if (dp == NULL)
943 		goto out_dec;
944 
945 	/*
946 	 * delegation seqid's are never incremented.  The 4.1 special
947 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
948 	 * 0 anyway just for consistency and use 1:
949 	 */
950 	dp->dl_stid.sc_stateid.si_generation = 1;
951 	INIT_LIST_HEAD(&dp->dl_perfile);
952 	INIT_LIST_HEAD(&dp->dl_perclnt);
953 	INIT_LIST_HEAD(&dp->dl_recall_lru);
954 	dp->dl_clnt_odstate = odstate;
955 	get_clnt_odstate(odstate);
956 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
957 	dp->dl_retries = 1;
958 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
959 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
960 	get_nfs4_file(fp);
961 	dp->dl_stid.sc_file = fp;
962 	return dp;
963 out_dec:
964 	atomic_long_dec(&num_delegations);
965 	return NULL;
966 }
967 
968 void
969 nfs4_put_stid(struct nfs4_stid *s)
970 {
971 	struct nfs4_file *fp = s->sc_file;
972 	struct nfs4_client *clp = s->sc_client;
973 
974 	might_lock(&clp->cl_lock);
975 
976 	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
977 		wake_up_all(&close_wq);
978 		return;
979 	}
980 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
981 	nfs4_free_cpntf_statelist(clp->net, s);
982 	spin_unlock(&clp->cl_lock);
983 	s->sc_free(s);
984 	if (fp)
985 		put_nfs4_file(fp);
986 }
987 
988 void
989 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
990 {
991 	stateid_t *src = &stid->sc_stateid;
992 
993 	spin_lock(&stid->sc_lock);
994 	if (unlikely(++src->si_generation == 0))
995 		src->si_generation = 1;
996 	memcpy(dst, src, sizeof(*dst));
997 	spin_unlock(&stid->sc_lock);
998 }
999 
1000 static void put_deleg_file(struct nfs4_file *fp)
1001 {
1002 	struct nfsd_file *nf = NULL;
1003 
1004 	spin_lock(&fp->fi_lock);
1005 	if (--fp->fi_delegees == 0)
1006 		swap(nf, fp->fi_deleg_file);
1007 	spin_unlock(&fp->fi_lock);
1008 
1009 	if (nf)
1010 		nfsd_file_put(nf);
1011 }
1012 
1013 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1014 {
1015 	struct nfs4_file *fp = dp->dl_stid.sc_file;
1016 	struct nfsd_file *nf = fp->fi_deleg_file;
1017 
1018 	WARN_ON_ONCE(!fp->fi_delegees);
1019 
1020 	vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1021 	put_deleg_file(fp);
1022 }
1023 
1024 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1025 {
1026 	put_clnt_odstate(dp->dl_clnt_odstate);
1027 	nfs4_unlock_deleg_lease(dp);
1028 	nfs4_put_stid(&dp->dl_stid);
1029 }
1030 
1031 void nfs4_unhash_stid(struct nfs4_stid *s)
1032 {
1033 	s->sc_type = 0;
1034 }
1035 
1036 /**
1037  * nfs4_delegation_exists - Discover if this delegation already exists
1038  * @clp:     a pointer to the nfs4_client we're granting a delegation to
1039  * @fp:      a pointer to the nfs4_file we're granting a delegation on
1040  *
1041  * Return:
1042  *      On success: true iff an existing delegation is found
1043  */
1044 
1045 static bool
1046 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1047 {
1048 	struct nfs4_delegation *searchdp = NULL;
1049 	struct nfs4_client *searchclp = NULL;
1050 
1051 	lockdep_assert_held(&state_lock);
1052 	lockdep_assert_held(&fp->fi_lock);
1053 
1054 	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1055 		searchclp = searchdp->dl_stid.sc_client;
1056 		if (clp == searchclp) {
1057 			return true;
1058 		}
1059 	}
1060 	return false;
1061 }
1062 
1063 /**
1064  * hash_delegation_locked - Add a delegation to the appropriate lists
1065  * @dp:     a pointer to the nfs4_delegation we are adding.
1066  * @fp:     a pointer to the nfs4_file we're granting a delegation on
1067  *
1068  * Return:
1069  *      On success: NULL if the delegation was successfully hashed.
1070  *
1071  *      On error: -EAGAIN if one was previously granted to this
1072  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
1073  *
1074  */
1075 
1076 static int
1077 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1078 {
1079 	struct nfs4_client *clp = dp->dl_stid.sc_client;
1080 
1081 	lockdep_assert_held(&state_lock);
1082 	lockdep_assert_held(&fp->fi_lock);
1083 
1084 	if (nfs4_delegation_exists(clp, fp))
1085 		return -EAGAIN;
1086 	refcount_inc(&dp->dl_stid.sc_count);
1087 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
1088 	list_add(&dp->dl_perfile, &fp->fi_delegations);
1089 	list_add(&dp->dl_perclnt, &clp->cl_delegations);
1090 	return 0;
1091 }
1092 
1093 static bool
1094 unhash_delegation_locked(struct nfs4_delegation *dp)
1095 {
1096 	struct nfs4_file *fp = dp->dl_stid.sc_file;
1097 
1098 	lockdep_assert_held(&state_lock);
1099 
1100 	if (list_empty(&dp->dl_perfile))
1101 		return false;
1102 
1103 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1104 	/* Ensure that deleg break won't try to requeue it */
1105 	++dp->dl_time;
1106 	spin_lock(&fp->fi_lock);
1107 	list_del_init(&dp->dl_perclnt);
1108 	list_del_init(&dp->dl_recall_lru);
1109 	list_del_init(&dp->dl_perfile);
1110 	spin_unlock(&fp->fi_lock);
1111 	return true;
1112 }
1113 
1114 static void destroy_delegation(struct nfs4_delegation *dp)
1115 {
1116 	bool unhashed;
1117 
1118 	spin_lock(&state_lock);
1119 	unhashed = unhash_delegation_locked(dp);
1120 	spin_unlock(&state_lock);
1121 	if (unhashed)
1122 		destroy_unhashed_deleg(dp);
1123 }
1124 
1125 static void revoke_delegation(struct nfs4_delegation *dp)
1126 {
1127 	struct nfs4_client *clp = dp->dl_stid.sc_client;
1128 
1129 	WARN_ON(!list_empty(&dp->dl_recall_lru));
1130 
1131 	if (clp->cl_minorversion) {
1132 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1133 		refcount_inc(&dp->dl_stid.sc_count);
1134 		spin_lock(&clp->cl_lock);
1135 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1136 		spin_unlock(&clp->cl_lock);
1137 	}
1138 	destroy_unhashed_deleg(dp);
1139 }
1140 
1141 /*
1142  * SETCLIENTID state
1143  */
1144 
1145 static unsigned int clientid_hashval(u32 id)
1146 {
1147 	return id & CLIENT_HASH_MASK;
1148 }
1149 
1150 static unsigned int clientstr_hashval(struct xdr_netobj name)
1151 {
1152 	return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1153 }
1154 
1155 /*
1156  * We store the NONE, READ, WRITE, and BOTH bits separately in the
1157  * st_{access,deny}_bmap field of the stateid, in order to track not
1158  * only what share bits are currently in force, but also what
1159  * combinations of share bits previous opens have used.  This allows us
1160  * to enforce the recommendation of rfc 3530 14.2.19 that the server
1161  * return an error if the client attempt to downgrade to a combination
1162  * of share bits not explicable by closing some of its previous opens.
1163  *
1164  * XXX: This enforcement is actually incomplete, since we don't keep
1165  * track of access/deny bit combinations; so, e.g., we allow:
1166  *
1167  *	OPEN allow read, deny write
1168  *	OPEN allow both, deny none
1169  *	DOWNGRADE allow read, deny none
1170  *
1171  * which we should reject.
1172  */
1173 static unsigned int
1174 bmap_to_share_mode(unsigned long bmap) {
1175 	int i;
1176 	unsigned int access = 0;
1177 
1178 	for (i = 1; i < 4; i++) {
1179 		if (test_bit(i, &bmap))
1180 			access |= i;
1181 	}
1182 	return access;
1183 }
1184 
1185 /* set share access for a given stateid */
1186 static inline void
1187 set_access(u32 access, struct nfs4_ol_stateid *stp)
1188 {
1189 	unsigned char mask = 1 << access;
1190 
1191 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1192 	stp->st_access_bmap |= mask;
1193 }
1194 
1195 /* clear share access for a given stateid */
1196 static inline void
1197 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1198 {
1199 	unsigned char mask = 1 << access;
1200 
1201 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1202 	stp->st_access_bmap &= ~mask;
1203 }
1204 
1205 /* test whether a given stateid has access */
1206 static inline bool
1207 test_access(u32 access, struct nfs4_ol_stateid *stp)
1208 {
1209 	unsigned char mask = 1 << access;
1210 
1211 	return (bool)(stp->st_access_bmap & mask);
1212 }
1213 
1214 /* set share deny for a given stateid */
1215 static inline void
1216 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1217 {
1218 	unsigned char mask = 1 << deny;
1219 
1220 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1221 	stp->st_deny_bmap |= mask;
1222 }
1223 
1224 /* clear share deny for a given stateid */
1225 static inline void
1226 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1227 {
1228 	unsigned char mask = 1 << deny;
1229 
1230 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1231 	stp->st_deny_bmap &= ~mask;
1232 }
1233 
1234 /* test whether a given stateid is denying specific access */
1235 static inline bool
1236 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1237 {
1238 	unsigned char mask = 1 << deny;
1239 
1240 	return (bool)(stp->st_deny_bmap & mask);
1241 }
1242 
1243 static int nfs4_access_to_omode(u32 access)
1244 {
1245 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
1246 	case NFS4_SHARE_ACCESS_READ:
1247 		return O_RDONLY;
1248 	case NFS4_SHARE_ACCESS_WRITE:
1249 		return O_WRONLY;
1250 	case NFS4_SHARE_ACCESS_BOTH:
1251 		return O_RDWR;
1252 	}
1253 	WARN_ON_ONCE(1);
1254 	return O_RDONLY;
1255 }
1256 
1257 /*
1258  * A stateid that had a deny mode associated with it is being released
1259  * or downgraded. Recalculate the deny mode on the file.
1260  */
1261 static void
1262 recalculate_deny_mode(struct nfs4_file *fp)
1263 {
1264 	struct nfs4_ol_stateid *stp;
1265 
1266 	spin_lock(&fp->fi_lock);
1267 	fp->fi_share_deny = 0;
1268 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1269 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1270 	spin_unlock(&fp->fi_lock);
1271 }
1272 
1273 static void
1274 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1275 {
1276 	int i;
1277 	bool change = false;
1278 
1279 	for (i = 1; i < 4; i++) {
1280 		if ((i & deny) != i) {
1281 			change = true;
1282 			clear_deny(i, stp);
1283 		}
1284 	}
1285 
1286 	/* Recalculate per-file deny mode if there was a change */
1287 	if (change)
1288 		recalculate_deny_mode(stp->st_stid.sc_file);
1289 }
1290 
1291 /* release all access and file references for a given stateid */
1292 static void
1293 release_all_access(struct nfs4_ol_stateid *stp)
1294 {
1295 	int i;
1296 	struct nfs4_file *fp = stp->st_stid.sc_file;
1297 
1298 	if (fp && stp->st_deny_bmap != 0)
1299 		recalculate_deny_mode(fp);
1300 
1301 	for (i = 1; i < 4; i++) {
1302 		if (test_access(i, stp))
1303 			nfs4_file_put_access(stp->st_stid.sc_file, i);
1304 		clear_access(i, stp);
1305 	}
1306 }
1307 
1308 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1309 {
1310 	kfree(sop->so_owner.data);
1311 	sop->so_ops->so_free(sop);
1312 }
1313 
1314 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1315 {
1316 	struct nfs4_client *clp = sop->so_client;
1317 
1318 	might_lock(&clp->cl_lock);
1319 
1320 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1321 		return;
1322 	sop->so_ops->so_unhash(sop);
1323 	spin_unlock(&clp->cl_lock);
1324 	nfs4_free_stateowner(sop);
1325 }
1326 
1327 static bool
1328 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1329 {
1330 	return list_empty(&stp->st_perfile);
1331 }
1332 
1333 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1334 {
1335 	struct nfs4_file *fp = stp->st_stid.sc_file;
1336 
1337 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1338 
1339 	if (list_empty(&stp->st_perfile))
1340 		return false;
1341 
1342 	spin_lock(&fp->fi_lock);
1343 	list_del_init(&stp->st_perfile);
1344 	spin_unlock(&fp->fi_lock);
1345 	list_del(&stp->st_perstateowner);
1346 	return true;
1347 }
1348 
1349 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1350 {
1351 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1352 
1353 	put_clnt_odstate(stp->st_clnt_odstate);
1354 	release_all_access(stp);
1355 	if (stp->st_stateowner)
1356 		nfs4_put_stateowner(stp->st_stateowner);
1357 	kmem_cache_free(stateid_slab, stid);
1358 }
1359 
1360 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1361 {
1362 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1363 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1364 	struct nfsd_file *nf;
1365 
1366 	nf = find_any_file(stp->st_stid.sc_file);
1367 	if (nf) {
1368 		get_file(nf->nf_file);
1369 		filp_close(nf->nf_file, (fl_owner_t)lo);
1370 		nfsd_file_put(nf);
1371 	}
1372 	nfs4_free_ol_stateid(stid);
1373 }
1374 
1375 /*
1376  * Put the persistent reference to an already unhashed generic stateid, while
1377  * holding the cl_lock. If it's the last reference, then put it onto the
1378  * reaplist for later destruction.
1379  */
1380 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1381 				       struct list_head *reaplist)
1382 {
1383 	struct nfs4_stid *s = &stp->st_stid;
1384 	struct nfs4_client *clp = s->sc_client;
1385 
1386 	lockdep_assert_held(&clp->cl_lock);
1387 
1388 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1389 
1390 	if (!refcount_dec_and_test(&s->sc_count)) {
1391 		wake_up_all(&close_wq);
1392 		return;
1393 	}
1394 
1395 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1396 	list_add(&stp->st_locks, reaplist);
1397 }
1398 
1399 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1400 {
1401 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1402 
1403 	if (!unhash_ol_stateid(stp))
1404 		return false;
1405 	list_del_init(&stp->st_locks);
1406 	nfs4_unhash_stid(&stp->st_stid);
1407 	return true;
1408 }
1409 
1410 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1411 {
1412 	struct nfs4_client *clp = stp->st_stid.sc_client;
1413 	bool unhashed;
1414 
1415 	spin_lock(&clp->cl_lock);
1416 	unhashed = unhash_lock_stateid(stp);
1417 	spin_unlock(&clp->cl_lock);
1418 	if (unhashed)
1419 		nfs4_put_stid(&stp->st_stid);
1420 }
1421 
1422 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1423 {
1424 	struct nfs4_client *clp = lo->lo_owner.so_client;
1425 
1426 	lockdep_assert_held(&clp->cl_lock);
1427 
1428 	list_del_init(&lo->lo_owner.so_strhash);
1429 }
1430 
1431 /*
1432  * Free a list of generic stateids that were collected earlier after being
1433  * fully unhashed.
1434  */
1435 static void
1436 free_ol_stateid_reaplist(struct list_head *reaplist)
1437 {
1438 	struct nfs4_ol_stateid *stp;
1439 	struct nfs4_file *fp;
1440 
1441 	might_sleep();
1442 
1443 	while (!list_empty(reaplist)) {
1444 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1445 				       st_locks);
1446 		list_del(&stp->st_locks);
1447 		fp = stp->st_stid.sc_file;
1448 		stp->st_stid.sc_free(&stp->st_stid);
1449 		if (fp)
1450 			put_nfs4_file(fp);
1451 	}
1452 }
1453 
1454 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1455 				       struct list_head *reaplist)
1456 {
1457 	struct nfs4_ol_stateid *stp;
1458 
1459 	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1460 
1461 	while (!list_empty(&open_stp->st_locks)) {
1462 		stp = list_entry(open_stp->st_locks.next,
1463 				struct nfs4_ol_stateid, st_locks);
1464 		WARN_ON(!unhash_lock_stateid(stp));
1465 		put_ol_stateid_locked(stp, reaplist);
1466 	}
1467 }
1468 
1469 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1470 				struct list_head *reaplist)
1471 {
1472 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1473 
1474 	if (!unhash_ol_stateid(stp))
1475 		return false;
1476 	release_open_stateid_locks(stp, reaplist);
1477 	return true;
1478 }
1479 
1480 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1481 {
1482 	LIST_HEAD(reaplist);
1483 
1484 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1485 	if (unhash_open_stateid(stp, &reaplist))
1486 		put_ol_stateid_locked(stp, &reaplist);
1487 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1488 	free_ol_stateid_reaplist(&reaplist);
1489 }
1490 
1491 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1492 {
1493 	struct nfs4_client *clp = oo->oo_owner.so_client;
1494 
1495 	lockdep_assert_held(&clp->cl_lock);
1496 
1497 	list_del_init(&oo->oo_owner.so_strhash);
1498 	list_del_init(&oo->oo_perclient);
1499 }
1500 
1501 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1502 {
1503 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1504 					  nfsd_net_id);
1505 	struct nfs4_ol_stateid *s;
1506 
1507 	spin_lock(&nn->client_lock);
1508 	s = oo->oo_last_closed_stid;
1509 	if (s) {
1510 		list_del_init(&oo->oo_close_lru);
1511 		oo->oo_last_closed_stid = NULL;
1512 	}
1513 	spin_unlock(&nn->client_lock);
1514 	if (s)
1515 		nfs4_put_stid(&s->st_stid);
1516 }
1517 
1518 static void release_openowner(struct nfs4_openowner *oo)
1519 {
1520 	struct nfs4_ol_stateid *stp;
1521 	struct nfs4_client *clp = oo->oo_owner.so_client;
1522 	struct list_head reaplist;
1523 
1524 	INIT_LIST_HEAD(&reaplist);
1525 
1526 	spin_lock(&clp->cl_lock);
1527 	unhash_openowner_locked(oo);
1528 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1529 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1530 				struct nfs4_ol_stateid, st_perstateowner);
1531 		if (unhash_open_stateid(stp, &reaplist))
1532 			put_ol_stateid_locked(stp, &reaplist);
1533 	}
1534 	spin_unlock(&clp->cl_lock);
1535 	free_ol_stateid_reaplist(&reaplist);
1536 	release_last_closed_stateid(oo);
1537 	nfs4_put_stateowner(&oo->oo_owner);
1538 }
1539 
1540 static inline int
1541 hash_sessionid(struct nfs4_sessionid *sessionid)
1542 {
1543 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1544 
1545 	return sid->sequence % SESSION_HASH_SIZE;
1546 }
1547 
1548 #ifdef CONFIG_SUNRPC_DEBUG
1549 static inline void
1550 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1551 {
1552 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1553 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1554 }
1555 #else
1556 static inline void
1557 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1558 {
1559 }
1560 #endif
1561 
1562 /*
1563  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1564  * won't be used for replay.
1565  */
1566 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1567 {
1568 	struct nfs4_stateowner *so = cstate->replay_owner;
1569 
1570 	if (nfserr == nfserr_replay_me)
1571 		return;
1572 
1573 	if (!seqid_mutating_err(ntohl(nfserr))) {
1574 		nfsd4_cstate_clear_replay(cstate);
1575 		return;
1576 	}
1577 	if (!so)
1578 		return;
1579 	if (so->so_is_open_owner)
1580 		release_last_closed_stateid(openowner(so));
1581 	so->so_seqid++;
1582 	return;
1583 }
1584 
1585 static void
1586 gen_sessionid(struct nfsd4_session *ses)
1587 {
1588 	struct nfs4_client *clp = ses->se_client;
1589 	struct nfsd4_sessionid *sid;
1590 
1591 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1592 	sid->clientid = clp->cl_clientid;
1593 	sid->sequence = current_sessionid++;
1594 	sid->reserved = 0;
1595 }
1596 
1597 /*
1598  * The protocol defines ca_maxresponssize_cached to include the size of
1599  * the rpc header, but all we need to cache is the data starting after
1600  * the end of the initial SEQUENCE operation--the rest we regenerate
1601  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1602  * value that is the number of bytes in our cache plus a few additional
1603  * bytes.  In order to stay on the safe side, and not promise more than
1604  * we can cache, those additional bytes must be the minimum possible: 24
1605  * bytes of rpc header (xid through accept state, with AUTH_NULL
1606  * verifier), 12 for the compound header (with zero-length tag), and 44
1607  * for the SEQUENCE op response:
1608  */
1609 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1610 
1611 static void
1612 free_session_slots(struct nfsd4_session *ses)
1613 {
1614 	int i;
1615 
1616 	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1617 		free_svc_cred(&ses->se_slots[i]->sl_cred);
1618 		kfree(ses->se_slots[i]);
1619 	}
1620 }
1621 
1622 /*
1623  * We don't actually need to cache the rpc and session headers, so we
1624  * can allocate a little less for each slot:
1625  */
1626 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1627 {
1628 	u32 size;
1629 
1630 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1631 		size = 0;
1632 	else
1633 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1634 	return size + sizeof(struct nfsd4_slot);
1635 }
1636 
1637 /*
1638  * XXX: If we run out of reserved DRC memory we could (up to a point)
1639  * re-negotiate active sessions and reduce their slot usage to make
1640  * room for new connections. For now we just fail the create session.
1641  */
1642 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1643 {
1644 	u32 slotsize = slot_bytes(ca);
1645 	u32 num = ca->maxreqs;
1646 	unsigned long avail, total_avail;
1647 	unsigned int scale_factor;
1648 
1649 	spin_lock(&nfsd_drc_lock);
1650 	if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1651 		total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1652 	else
1653 		/* We have handed out more space than we chose in
1654 		 * set_max_drc() to allow.  That isn't really a
1655 		 * problem as long as that doesn't make us think we
1656 		 * have lots more due to integer overflow.
1657 		 */
1658 		total_avail = 0;
1659 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1660 	/*
1661 	 * Never use more than a fraction of the remaining memory,
1662 	 * unless it's the only way to give this client a slot.
1663 	 * The chosen fraction is either 1/8 or 1/number of threads,
1664 	 * whichever is smaller.  This ensures there are adequate
1665 	 * slots to support multiple clients per thread.
1666 	 * Give the client one slot even if that would require
1667 	 * over-allocation--it is better than failure.
1668 	 */
1669 	scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1670 
1671 	avail = clamp_t(unsigned long, avail, slotsize,
1672 			total_avail/scale_factor);
1673 	num = min_t(int, num, avail / slotsize);
1674 	num = max_t(int, num, 1);
1675 	nfsd_drc_mem_used += num * slotsize;
1676 	spin_unlock(&nfsd_drc_lock);
1677 
1678 	return num;
1679 }
1680 
1681 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1682 {
1683 	int slotsize = slot_bytes(ca);
1684 
1685 	spin_lock(&nfsd_drc_lock);
1686 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1687 	spin_unlock(&nfsd_drc_lock);
1688 }
1689 
1690 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1691 					   struct nfsd4_channel_attrs *battrs)
1692 {
1693 	int numslots = fattrs->maxreqs;
1694 	int slotsize = slot_bytes(fattrs);
1695 	struct nfsd4_session *new;
1696 	int mem, i;
1697 
1698 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1699 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1700 	mem = numslots * sizeof(struct nfsd4_slot *);
1701 
1702 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1703 	if (!new)
1704 		return NULL;
1705 	/* allocate each struct nfsd4_slot and data cache in one piece */
1706 	for (i = 0; i < numslots; i++) {
1707 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1708 		if (!new->se_slots[i])
1709 			goto out_free;
1710 	}
1711 
1712 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1713 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1714 
1715 	return new;
1716 out_free:
1717 	while (i--)
1718 		kfree(new->se_slots[i]);
1719 	kfree(new);
1720 	return NULL;
1721 }
1722 
1723 static void free_conn(struct nfsd4_conn *c)
1724 {
1725 	svc_xprt_put(c->cn_xprt);
1726 	kfree(c);
1727 }
1728 
1729 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1730 {
1731 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1732 	struct nfs4_client *clp = c->cn_session->se_client;
1733 
1734 	spin_lock(&clp->cl_lock);
1735 	if (!list_empty(&c->cn_persession)) {
1736 		list_del(&c->cn_persession);
1737 		free_conn(c);
1738 	}
1739 	nfsd4_probe_callback(clp);
1740 	spin_unlock(&clp->cl_lock);
1741 }
1742 
1743 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1744 {
1745 	struct nfsd4_conn *conn;
1746 
1747 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1748 	if (!conn)
1749 		return NULL;
1750 	svc_xprt_get(rqstp->rq_xprt);
1751 	conn->cn_xprt = rqstp->rq_xprt;
1752 	conn->cn_flags = flags;
1753 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1754 	return conn;
1755 }
1756 
1757 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1758 {
1759 	conn->cn_session = ses;
1760 	list_add(&conn->cn_persession, &ses->se_conns);
1761 }
1762 
1763 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1764 {
1765 	struct nfs4_client *clp = ses->se_client;
1766 
1767 	spin_lock(&clp->cl_lock);
1768 	__nfsd4_hash_conn(conn, ses);
1769 	spin_unlock(&clp->cl_lock);
1770 }
1771 
1772 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1773 {
1774 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1775 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1776 }
1777 
1778 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1779 {
1780 	int ret;
1781 
1782 	nfsd4_hash_conn(conn, ses);
1783 	ret = nfsd4_register_conn(conn);
1784 	if (ret)
1785 		/* oops; xprt is already down: */
1786 		nfsd4_conn_lost(&conn->cn_xpt_user);
1787 	/* We may have gained or lost a callback channel: */
1788 	nfsd4_probe_callback_sync(ses->se_client);
1789 }
1790 
1791 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1792 {
1793 	u32 dir = NFS4_CDFC4_FORE;
1794 
1795 	if (cses->flags & SESSION4_BACK_CHAN)
1796 		dir |= NFS4_CDFC4_BACK;
1797 	return alloc_conn(rqstp, dir);
1798 }
1799 
1800 /* must be called under client_lock */
1801 static void nfsd4_del_conns(struct nfsd4_session *s)
1802 {
1803 	struct nfs4_client *clp = s->se_client;
1804 	struct nfsd4_conn *c;
1805 
1806 	spin_lock(&clp->cl_lock);
1807 	while (!list_empty(&s->se_conns)) {
1808 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1809 		list_del_init(&c->cn_persession);
1810 		spin_unlock(&clp->cl_lock);
1811 
1812 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1813 		free_conn(c);
1814 
1815 		spin_lock(&clp->cl_lock);
1816 	}
1817 	spin_unlock(&clp->cl_lock);
1818 }
1819 
1820 static void __free_session(struct nfsd4_session *ses)
1821 {
1822 	free_session_slots(ses);
1823 	kfree(ses);
1824 }
1825 
1826 static void free_session(struct nfsd4_session *ses)
1827 {
1828 	nfsd4_del_conns(ses);
1829 	nfsd4_put_drc_mem(&ses->se_fchannel);
1830 	__free_session(ses);
1831 }
1832 
1833 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1834 {
1835 	int idx;
1836 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1837 
1838 	new->se_client = clp;
1839 	gen_sessionid(new);
1840 
1841 	INIT_LIST_HEAD(&new->se_conns);
1842 
1843 	new->se_cb_seq_nr = 1;
1844 	new->se_flags = cses->flags;
1845 	new->se_cb_prog = cses->callback_prog;
1846 	new->se_cb_sec = cses->cb_sec;
1847 	atomic_set(&new->se_ref, 0);
1848 	idx = hash_sessionid(&new->se_sessionid);
1849 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1850 	spin_lock(&clp->cl_lock);
1851 	list_add(&new->se_perclnt, &clp->cl_sessions);
1852 	spin_unlock(&clp->cl_lock);
1853 
1854 	{
1855 		struct sockaddr *sa = svc_addr(rqstp);
1856 		/*
1857 		 * This is a little silly; with sessions there's no real
1858 		 * use for the callback address.  Use the peer address
1859 		 * as a reasonable default for now, but consider fixing
1860 		 * the rpc client not to require an address in the
1861 		 * future:
1862 		 */
1863 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1864 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1865 	}
1866 }
1867 
1868 /* caller must hold client_lock */
1869 static struct nfsd4_session *
1870 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1871 {
1872 	struct nfsd4_session *elem;
1873 	int idx;
1874 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1875 
1876 	lockdep_assert_held(&nn->client_lock);
1877 
1878 	dump_sessionid(__func__, sessionid);
1879 	idx = hash_sessionid(sessionid);
1880 	/* Search in the appropriate list */
1881 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1882 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1883 			    NFS4_MAX_SESSIONID_LEN)) {
1884 			return elem;
1885 		}
1886 	}
1887 
1888 	dprintk("%s: session not found\n", __func__);
1889 	return NULL;
1890 }
1891 
1892 static struct nfsd4_session *
1893 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1894 		__be32 *ret)
1895 {
1896 	struct nfsd4_session *session;
1897 	__be32 status = nfserr_badsession;
1898 
1899 	session = __find_in_sessionid_hashtbl(sessionid, net);
1900 	if (!session)
1901 		goto out;
1902 	status = nfsd4_get_session_locked(session);
1903 	if (status)
1904 		session = NULL;
1905 out:
1906 	*ret = status;
1907 	return session;
1908 }
1909 
1910 /* caller must hold client_lock */
1911 static void
1912 unhash_session(struct nfsd4_session *ses)
1913 {
1914 	struct nfs4_client *clp = ses->se_client;
1915 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1916 
1917 	lockdep_assert_held(&nn->client_lock);
1918 
1919 	list_del(&ses->se_hash);
1920 	spin_lock(&ses->se_client->cl_lock);
1921 	list_del(&ses->se_perclnt);
1922 	spin_unlock(&ses->se_client->cl_lock);
1923 }
1924 
1925 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1926 static int
1927 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1928 {
1929 	/*
1930 	 * We're assuming the clid was not given out from a boot
1931 	 * precisely 2^32 (about 136 years) before this one.  That seems
1932 	 * a safe assumption:
1933 	 */
1934 	if (clid->cl_boot == (u32)nn->boot_time)
1935 		return 0;
1936 	trace_nfsd_clid_stale(clid);
1937 	return 1;
1938 }
1939 
1940 /*
1941  * XXX Should we use a slab cache ?
1942  * This type of memory management is somewhat inefficient, but we use it
1943  * anyway since SETCLIENTID is not a common operation.
1944  */
1945 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1946 {
1947 	struct nfs4_client *clp;
1948 	int i;
1949 
1950 	clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1951 	if (clp == NULL)
1952 		return NULL;
1953 	xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1954 	if (clp->cl_name.data == NULL)
1955 		goto err_no_name;
1956 	clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1957 						 sizeof(struct list_head),
1958 						 GFP_KERNEL);
1959 	if (!clp->cl_ownerstr_hashtbl)
1960 		goto err_no_hashtbl;
1961 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1962 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1963 	INIT_LIST_HEAD(&clp->cl_sessions);
1964 	idr_init(&clp->cl_stateids);
1965 	atomic_set(&clp->cl_rpc_users, 0);
1966 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1967 	INIT_LIST_HEAD(&clp->cl_idhash);
1968 	INIT_LIST_HEAD(&clp->cl_openowners);
1969 	INIT_LIST_HEAD(&clp->cl_delegations);
1970 	INIT_LIST_HEAD(&clp->cl_lru);
1971 	INIT_LIST_HEAD(&clp->cl_revoked);
1972 #ifdef CONFIG_NFSD_PNFS
1973 	INIT_LIST_HEAD(&clp->cl_lo_states);
1974 #endif
1975 	INIT_LIST_HEAD(&clp->async_copies);
1976 	spin_lock_init(&clp->async_lock);
1977 	spin_lock_init(&clp->cl_lock);
1978 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1979 	return clp;
1980 err_no_hashtbl:
1981 	kfree(clp->cl_name.data);
1982 err_no_name:
1983 	kmem_cache_free(client_slab, clp);
1984 	return NULL;
1985 }
1986 
1987 static void __free_client(struct kref *k)
1988 {
1989 	struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
1990 	struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
1991 
1992 	free_svc_cred(&clp->cl_cred);
1993 	kfree(clp->cl_ownerstr_hashtbl);
1994 	kfree(clp->cl_name.data);
1995 	kfree(clp->cl_nii_domain.data);
1996 	kfree(clp->cl_nii_name.data);
1997 	idr_destroy(&clp->cl_stateids);
1998 	kmem_cache_free(client_slab, clp);
1999 }
2000 
2001 static void drop_client(struct nfs4_client *clp)
2002 {
2003 	kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2004 }
2005 
2006 static void
2007 free_client(struct nfs4_client *clp)
2008 {
2009 	while (!list_empty(&clp->cl_sessions)) {
2010 		struct nfsd4_session *ses;
2011 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2012 				se_perclnt);
2013 		list_del(&ses->se_perclnt);
2014 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
2015 		free_session(ses);
2016 	}
2017 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2018 	if (clp->cl_nfsd_dentry) {
2019 		nfsd_client_rmdir(clp->cl_nfsd_dentry);
2020 		clp->cl_nfsd_dentry = NULL;
2021 		wake_up_all(&expiry_wq);
2022 	}
2023 	drop_client(clp);
2024 }
2025 
2026 /* must be called under the client_lock */
2027 static void
2028 unhash_client_locked(struct nfs4_client *clp)
2029 {
2030 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2031 	struct nfsd4_session *ses;
2032 
2033 	lockdep_assert_held(&nn->client_lock);
2034 
2035 	/* Mark the client as expired! */
2036 	clp->cl_time = 0;
2037 	/* Make it invisible */
2038 	if (!list_empty(&clp->cl_idhash)) {
2039 		list_del_init(&clp->cl_idhash);
2040 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2041 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2042 		else
2043 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2044 	}
2045 	list_del_init(&clp->cl_lru);
2046 	spin_lock(&clp->cl_lock);
2047 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2048 		list_del_init(&ses->se_hash);
2049 	spin_unlock(&clp->cl_lock);
2050 }
2051 
2052 static void
2053 unhash_client(struct nfs4_client *clp)
2054 {
2055 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2056 
2057 	spin_lock(&nn->client_lock);
2058 	unhash_client_locked(clp);
2059 	spin_unlock(&nn->client_lock);
2060 }
2061 
2062 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2063 {
2064 	if (atomic_read(&clp->cl_rpc_users))
2065 		return nfserr_jukebox;
2066 	unhash_client_locked(clp);
2067 	return nfs_ok;
2068 }
2069 
2070 static void
2071 __destroy_client(struct nfs4_client *clp)
2072 {
2073 	int i;
2074 	struct nfs4_openowner *oo;
2075 	struct nfs4_delegation *dp;
2076 	struct list_head reaplist;
2077 
2078 	INIT_LIST_HEAD(&reaplist);
2079 	spin_lock(&state_lock);
2080 	while (!list_empty(&clp->cl_delegations)) {
2081 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2082 		WARN_ON(!unhash_delegation_locked(dp));
2083 		list_add(&dp->dl_recall_lru, &reaplist);
2084 	}
2085 	spin_unlock(&state_lock);
2086 	while (!list_empty(&reaplist)) {
2087 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2088 		list_del_init(&dp->dl_recall_lru);
2089 		destroy_unhashed_deleg(dp);
2090 	}
2091 	while (!list_empty(&clp->cl_revoked)) {
2092 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2093 		list_del_init(&dp->dl_recall_lru);
2094 		nfs4_put_stid(&dp->dl_stid);
2095 	}
2096 	while (!list_empty(&clp->cl_openowners)) {
2097 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2098 		nfs4_get_stateowner(&oo->oo_owner);
2099 		release_openowner(oo);
2100 	}
2101 	for (i = 0; i < OWNER_HASH_SIZE; i++) {
2102 		struct nfs4_stateowner *so, *tmp;
2103 
2104 		list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2105 					 so_strhash) {
2106 			/* Should be no openowners at this point */
2107 			WARN_ON_ONCE(so->so_is_open_owner);
2108 			remove_blocked_locks(lockowner(so));
2109 		}
2110 	}
2111 	nfsd4_return_all_client_layouts(clp);
2112 	nfsd4_shutdown_copy(clp);
2113 	nfsd4_shutdown_callback(clp);
2114 	if (clp->cl_cb_conn.cb_xprt)
2115 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2116 	free_client(clp);
2117 	wake_up_all(&expiry_wq);
2118 }
2119 
2120 static void
2121 destroy_client(struct nfs4_client *clp)
2122 {
2123 	unhash_client(clp);
2124 	__destroy_client(clp);
2125 }
2126 
2127 static void inc_reclaim_complete(struct nfs4_client *clp)
2128 {
2129 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2130 
2131 	if (!nn->track_reclaim_completes)
2132 		return;
2133 	if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2134 		return;
2135 	if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2136 			nn->reclaim_str_hashtbl_size) {
2137 		printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2138 				clp->net->ns.inum);
2139 		nfsd4_end_grace(nn);
2140 	}
2141 }
2142 
2143 static void expire_client(struct nfs4_client *clp)
2144 {
2145 	unhash_client(clp);
2146 	nfsd4_client_record_remove(clp);
2147 	__destroy_client(clp);
2148 }
2149 
2150 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2151 {
2152 	memcpy(target->cl_verifier.data, source->data,
2153 			sizeof(target->cl_verifier.data));
2154 }
2155 
2156 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2157 {
2158 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2159 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2160 }
2161 
2162 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2163 {
2164 	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2165 	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2166 								GFP_KERNEL);
2167 	target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2168 	if ((source->cr_principal && !target->cr_principal) ||
2169 	    (source->cr_raw_principal && !target->cr_raw_principal) ||
2170 	    (source->cr_targ_princ && !target->cr_targ_princ))
2171 		return -ENOMEM;
2172 
2173 	target->cr_flavor = source->cr_flavor;
2174 	target->cr_uid = source->cr_uid;
2175 	target->cr_gid = source->cr_gid;
2176 	target->cr_group_info = source->cr_group_info;
2177 	get_group_info(target->cr_group_info);
2178 	target->cr_gss_mech = source->cr_gss_mech;
2179 	if (source->cr_gss_mech)
2180 		gss_mech_get(source->cr_gss_mech);
2181 	return 0;
2182 }
2183 
2184 static int
2185 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2186 {
2187 	if (o1->len < o2->len)
2188 		return -1;
2189 	if (o1->len > o2->len)
2190 		return 1;
2191 	return memcmp(o1->data, o2->data, o1->len);
2192 }
2193 
2194 static int
2195 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2196 {
2197 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2198 }
2199 
2200 static int
2201 same_clid(clientid_t *cl1, clientid_t *cl2)
2202 {
2203 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2204 }
2205 
2206 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2207 {
2208 	int i;
2209 
2210 	if (g1->ngroups != g2->ngroups)
2211 		return false;
2212 	for (i=0; i<g1->ngroups; i++)
2213 		if (!gid_eq(g1->gid[i], g2->gid[i]))
2214 			return false;
2215 	return true;
2216 }
2217 
2218 /*
2219  * RFC 3530 language requires clid_inuse be returned when the
2220  * "principal" associated with a requests differs from that previously
2221  * used.  We use uid, gid's, and gss principal string as our best
2222  * approximation.  We also don't want to allow non-gss use of a client
2223  * established using gss: in theory cr_principal should catch that
2224  * change, but in practice cr_principal can be null even in the gss case
2225  * since gssd doesn't always pass down a principal string.
2226  */
2227 static bool is_gss_cred(struct svc_cred *cr)
2228 {
2229 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
2230 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2231 }
2232 
2233 
2234 static bool
2235 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2236 {
2237 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2238 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2239 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2240 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2241 		return false;
2242 	/* XXX: check that cr_targ_princ fields match ? */
2243 	if (cr1->cr_principal == cr2->cr_principal)
2244 		return true;
2245 	if (!cr1->cr_principal || !cr2->cr_principal)
2246 		return false;
2247 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2248 }
2249 
2250 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2251 {
2252 	struct svc_cred *cr = &rqstp->rq_cred;
2253 	u32 service;
2254 
2255 	if (!cr->cr_gss_mech)
2256 		return false;
2257 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2258 	return service == RPC_GSS_SVC_INTEGRITY ||
2259 	       service == RPC_GSS_SVC_PRIVACY;
2260 }
2261 
2262 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2263 {
2264 	struct svc_cred *cr = &rqstp->rq_cred;
2265 
2266 	if (!cl->cl_mach_cred)
2267 		return true;
2268 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2269 		return false;
2270 	if (!svc_rqst_integrity_protected(rqstp))
2271 		return false;
2272 	if (cl->cl_cred.cr_raw_principal)
2273 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2274 						cr->cr_raw_principal);
2275 	if (!cr->cr_principal)
2276 		return false;
2277 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2278 }
2279 
2280 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2281 {
2282 	__be32 verf[2];
2283 
2284 	/*
2285 	 * This is opaque to client, so no need to byte-swap. Use
2286 	 * __force to keep sparse happy
2287 	 */
2288 	verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2289 	verf[1] = (__force __be32)nn->clverifier_counter++;
2290 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2291 }
2292 
2293 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2294 {
2295 	clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2296 	clp->cl_clientid.cl_id = nn->clientid_counter++;
2297 	gen_confirm(clp, nn);
2298 }
2299 
2300 static struct nfs4_stid *
2301 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2302 {
2303 	struct nfs4_stid *ret;
2304 
2305 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2306 	if (!ret || !ret->sc_type)
2307 		return NULL;
2308 	return ret;
2309 }
2310 
2311 static struct nfs4_stid *
2312 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2313 {
2314 	struct nfs4_stid *s;
2315 
2316 	spin_lock(&cl->cl_lock);
2317 	s = find_stateid_locked(cl, t);
2318 	if (s != NULL) {
2319 		if (typemask & s->sc_type)
2320 			refcount_inc(&s->sc_count);
2321 		else
2322 			s = NULL;
2323 	}
2324 	spin_unlock(&cl->cl_lock);
2325 	return s;
2326 }
2327 
2328 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2329 {
2330 	struct nfsdfs_client *nc;
2331 	nc = get_nfsdfs_client(inode);
2332 	if (!nc)
2333 		return NULL;
2334 	return container_of(nc, struct nfs4_client, cl_nfsdfs);
2335 }
2336 
2337 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2338 {
2339 	seq_printf(m, "\"");
2340 	seq_escape_mem_ascii(m, data, len);
2341 	seq_printf(m, "\"");
2342 }
2343 
2344 static int client_info_show(struct seq_file *m, void *v)
2345 {
2346 	struct inode *inode = m->private;
2347 	struct nfs4_client *clp;
2348 	u64 clid;
2349 
2350 	clp = get_nfsdfs_clp(inode);
2351 	if (!clp)
2352 		return -ENXIO;
2353 	memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2354 	seq_printf(m, "clientid: 0x%llx\n", clid);
2355 	seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2356 	if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2357 		seq_puts(m, "status: confirmed\n");
2358 	else
2359 		seq_puts(m, "status: unconfirmed\n");
2360 	seq_printf(m, "name: ");
2361 	seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2362 	seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2363 	if (clp->cl_nii_domain.data) {
2364 		seq_printf(m, "Implementation domain: ");
2365 		seq_quote_mem(m, clp->cl_nii_domain.data,
2366 					clp->cl_nii_domain.len);
2367 		seq_printf(m, "\nImplementation name: ");
2368 		seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2369 		seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2370 			clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2371 	}
2372 	drop_client(clp);
2373 
2374 	return 0;
2375 }
2376 
2377 static int client_info_open(struct inode *inode, struct file *file)
2378 {
2379 	return single_open(file, client_info_show, inode);
2380 }
2381 
2382 static const struct file_operations client_info_fops = {
2383 	.open		= client_info_open,
2384 	.read		= seq_read,
2385 	.llseek		= seq_lseek,
2386 	.release	= single_release,
2387 };
2388 
2389 static void *states_start(struct seq_file *s, loff_t *pos)
2390 	__acquires(&clp->cl_lock)
2391 {
2392 	struct nfs4_client *clp = s->private;
2393 	unsigned long id = *pos;
2394 	void *ret;
2395 
2396 	spin_lock(&clp->cl_lock);
2397 	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2398 	*pos = id;
2399 	return ret;
2400 }
2401 
2402 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2403 {
2404 	struct nfs4_client *clp = s->private;
2405 	unsigned long id = *pos;
2406 	void *ret;
2407 
2408 	id = *pos;
2409 	id++;
2410 	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2411 	*pos = id;
2412 	return ret;
2413 }
2414 
2415 static void states_stop(struct seq_file *s, void *v)
2416 	__releases(&clp->cl_lock)
2417 {
2418 	struct nfs4_client *clp = s->private;
2419 
2420 	spin_unlock(&clp->cl_lock);
2421 }
2422 
2423 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2424 {
2425          seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2426 }
2427 
2428 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2429 {
2430 	struct inode *inode = f->nf_inode;
2431 
2432 	seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2433 					MAJOR(inode->i_sb->s_dev),
2434 					 MINOR(inode->i_sb->s_dev),
2435 					 inode->i_ino);
2436 }
2437 
2438 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2439 {
2440 	seq_printf(s, "owner: ");
2441 	seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2442 }
2443 
2444 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2445 {
2446 	seq_printf(s, "0x%.8x", stid->si_generation);
2447 	seq_printf(s, "%12phN", &stid->si_opaque);
2448 }
2449 
2450 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2451 {
2452 	struct nfs4_ol_stateid *ols;
2453 	struct nfs4_file *nf;
2454 	struct nfsd_file *file;
2455 	struct nfs4_stateowner *oo;
2456 	unsigned int access, deny;
2457 
2458 	if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2459 		return 0; /* XXX: or SEQ_SKIP? */
2460 	ols = openlockstateid(st);
2461 	oo = ols->st_stateowner;
2462 	nf = st->sc_file;
2463 	file = find_any_file(nf);
2464 	if (!file)
2465 		return 0;
2466 
2467 	seq_printf(s, "- ");
2468 	nfs4_show_stateid(s, &st->sc_stateid);
2469 	seq_printf(s, ": { type: open, ");
2470 
2471 	access = bmap_to_share_mode(ols->st_access_bmap);
2472 	deny   = bmap_to_share_mode(ols->st_deny_bmap);
2473 
2474 	seq_printf(s, "access: %s%s, ",
2475 		access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2476 		access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2477 	seq_printf(s, "deny: %s%s, ",
2478 		deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2479 		deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2480 
2481 	nfs4_show_superblock(s, file);
2482 	seq_printf(s, ", ");
2483 	nfs4_show_fname(s, file);
2484 	seq_printf(s, ", ");
2485 	nfs4_show_owner(s, oo);
2486 	seq_printf(s, " }\n");
2487 	nfsd_file_put(file);
2488 
2489 	return 0;
2490 }
2491 
2492 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2493 {
2494 	struct nfs4_ol_stateid *ols;
2495 	struct nfs4_file *nf;
2496 	struct nfsd_file *file;
2497 	struct nfs4_stateowner *oo;
2498 
2499 	ols = openlockstateid(st);
2500 	oo = ols->st_stateowner;
2501 	nf = st->sc_file;
2502 	file = find_any_file(nf);
2503 	if (!file)
2504 		return 0;
2505 
2506 	seq_printf(s, "- ");
2507 	nfs4_show_stateid(s, &st->sc_stateid);
2508 	seq_printf(s, ": { type: lock, ");
2509 
2510 	/*
2511 	 * Note: a lock stateid isn't really the same thing as a lock,
2512 	 * it's the locking state held by one owner on a file, and there
2513 	 * may be multiple (or no) lock ranges associated with it.
2514 	 * (Same for the matter is true of open stateids.)
2515 	 */
2516 
2517 	nfs4_show_superblock(s, file);
2518 	/* XXX: open stateid? */
2519 	seq_printf(s, ", ");
2520 	nfs4_show_fname(s, file);
2521 	seq_printf(s, ", ");
2522 	nfs4_show_owner(s, oo);
2523 	seq_printf(s, " }\n");
2524 	nfsd_file_put(file);
2525 
2526 	return 0;
2527 }
2528 
2529 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2530 {
2531 	struct nfs4_delegation *ds;
2532 	struct nfs4_file *nf;
2533 	struct nfsd_file *file;
2534 
2535 	ds = delegstateid(st);
2536 	nf = st->sc_file;
2537 	file = find_deleg_file(nf);
2538 	if (!file)
2539 		return 0;
2540 
2541 	seq_printf(s, "- ");
2542 	nfs4_show_stateid(s, &st->sc_stateid);
2543 	seq_printf(s, ": { type: deleg, ");
2544 
2545 	/* Kinda dead code as long as we only support read delegs: */
2546 	seq_printf(s, "access: %s, ",
2547 		ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2548 
2549 	/* XXX: lease time, whether it's being recalled. */
2550 
2551 	nfs4_show_superblock(s, file);
2552 	seq_printf(s, ", ");
2553 	nfs4_show_fname(s, file);
2554 	seq_printf(s, " }\n");
2555 	nfsd_file_put(file);
2556 
2557 	return 0;
2558 }
2559 
2560 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2561 {
2562 	struct nfs4_layout_stateid *ls;
2563 	struct nfsd_file *file;
2564 
2565 	ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2566 	file = ls->ls_file;
2567 
2568 	seq_printf(s, "- ");
2569 	nfs4_show_stateid(s, &st->sc_stateid);
2570 	seq_printf(s, ": { type: layout, ");
2571 
2572 	/* XXX: What else would be useful? */
2573 
2574 	nfs4_show_superblock(s, file);
2575 	seq_printf(s, ", ");
2576 	nfs4_show_fname(s, file);
2577 	seq_printf(s, " }\n");
2578 
2579 	return 0;
2580 }
2581 
2582 static int states_show(struct seq_file *s, void *v)
2583 {
2584 	struct nfs4_stid *st = v;
2585 
2586 	switch (st->sc_type) {
2587 	case NFS4_OPEN_STID:
2588 		return nfs4_show_open(s, st);
2589 	case NFS4_LOCK_STID:
2590 		return nfs4_show_lock(s, st);
2591 	case NFS4_DELEG_STID:
2592 		return nfs4_show_deleg(s, st);
2593 	case NFS4_LAYOUT_STID:
2594 		return nfs4_show_layout(s, st);
2595 	default:
2596 		return 0; /* XXX: or SEQ_SKIP? */
2597 	}
2598 	/* XXX: copy stateids? */
2599 }
2600 
2601 static struct seq_operations states_seq_ops = {
2602 	.start = states_start,
2603 	.next = states_next,
2604 	.stop = states_stop,
2605 	.show = states_show
2606 };
2607 
2608 static int client_states_open(struct inode *inode, struct file *file)
2609 {
2610 	struct seq_file *s;
2611 	struct nfs4_client *clp;
2612 	int ret;
2613 
2614 	clp = get_nfsdfs_clp(inode);
2615 	if (!clp)
2616 		return -ENXIO;
2617 
2618 	ret = seq_open(file, &states_seq_ops);
2619 	if (ret)
2620 		return ret;
2621 	s = file->private_data;
2622 	s->private = clp;
2623 	return 0;
2624 }
2625 
2626 static int client_opens_release(struct inode *inode, struct file *file)
2627 {
2628 	struct seq_file *m = file->private_data;
2629 	struct nfs4_client *clp = m->private;
2630 
2631 	/* XXX: alternatively, we could get/drop in seq start/stop */
2632 	drop_client(clp);
2633 	return 0;
2634 }
2635 
2636 static const struct file_operations client_states_fops = {
2637 	.open		= client_states_open,
2638 	.read		= seq_read,
2639 	.llseek		= seq_lseek,
2640 	.release	= client_opens_release,
2641 };
2642 
2643 /*
2644  * Normally we refuse to destroy clients that are in use, but here the
2645  * administrator is telling us to just do it.  We also want to wait
2646  * so the caller has a guarantee that the client's locks are gone by
2647  * the time the write returns:
2648  */
2649 static void force_expire_client(struct nfs4_client *clp)
2650 {
2651 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2652 	bool already_expired;
2653 
2654 	spin_lock(&clp->cl_lock);
2655 	clp->cl_time = 0;
2656 	spin_unlock(&clp->cl_lock);
2657 
2658 	wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2659 	spin_lock(&nn->client_lock);
2660 	already_expired = list_empty(&clp->cl_lru);
2661 	if (!already_expired)
2662 		unhash_client_locked(clp);
2663 	spin_unlock(&nn->client_lock);
2664 
2665 	if (!already_expired)
2666 		expire_client(clp);
2667 	else
2668 		wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2669 }
2670 
2671 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2672 				   size_t size, loff_t *pos)
2673 {
2674 	char *data;
2675 	struct nfs4_client *clp;
2676 
2677 	data = simple_transaction_get(file, buf, size);
2678 	if (IS_ERR(data))
2679 		return PTR_ERR(data);
2680 	if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2681 		return -EINVAL;
2682 	clp = get_nfsdfs_clp(file_inode(file));
2683 	if (!clp)
2684 		return -ENXIO;
2685 	force_expire_client(clp);
2686 	drop_client(clp);
2687 	return 7;
2688 }
2689 
2690 static const struct file_operations client_ctl_fops = {
2691 	.write		= client_ctl_write,
2692 	.release	= simple_transaction_release,
2693 };
2694 
2695 static const struct tree_descr client_files[] = {
2696 	[0] = {"info", &client_info_fops, S_IRUSR},
2697 	[1] = {"states", &client_states_fops, S_IRUSR},
2698 	[2] = {"ctl", &client_ctl_fops, S_IWUSR},
2699 	[3] = {""},
2700 };
2701 
2702 static struct nfs4_client *create_client(struct xdr_netobj name,
2703 		struct svc_rqst *rqstp, nfs4_verifier *verf)
2704 {
2705 	struct nfs4_client *clp;
2706 	struct sockaddr *sa = svc_addr(rqstp);
2707 	int ret;
2708 	struct net *net = SVC_NET(rqstp);
2709 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2710 	struct dentry *dentries[ARRAY_SIZE(client_files)];
2711 
2712 	clp = alloc_client(name);
2713 	if (clp == NULL)
2714 		return NULL;
2715 
2716 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2717 	if (ret) {
2718 		free_client(clp);
2719 		return NULL;
2720 	}
2721 	gen_clid(clp, nn);
2722 	kref_init(&clp->cl_nfsdfs.cl_ref);
2723 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2724 	clp->cl_time = ktime_get_boottime_seconds();
2725 	clear_bit(0, &clp->cl_cb_slot_busy);
2726 	copy_verf(clp, verf);
2727 	memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2728 	clp->cl_cb_session = NULL;
2729 	clp->net = net;
2730 	clp->cl_nfsd_dentry = nfsd_client_mkdir(
2731 		nn, &clp->cl_nfsdfs,
2732 		clp->cl_clientid.cl_id - nn->clientid_base,
2733 		client_files, dentries);
2734 	clp->cl_nfsd_info_dentry = dentries[0];
2735 	if (!clp->cl_nfsd_dentry) {
2736 		free_client(clp);
2737 		return NULL;
2738 	}
2739 	return clp;
2740 }
2741 
2742 static void
2743 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2744 {
2745 	struct rb_node **new = &(root->rb_node), *parent = NULL;
2746 	struct nfs4_client *clp;
2747 
2748 	while (*new) {
2749 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2750 		parent = *new;
2751 
2752 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2753 			new = &((*new)->rb_left);
2754 		else
2755 			new = &((*new)->rb_right);
2756 	}
2757 
2758 	rb_link_node(&new_clp->cl_namenode, parent, new);
2759 	rb_insert_color(&new_clp->cl_namenode, root);
2760 }
2761 
2762 static struct nfs4_client *
2763 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2764 {
2765 	int cmp;
2766 	struct rb_node *node = root->rb_node;
2767 	struct nfs4_client *clp;
2768 
2769 	while (node) {
2770 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2771 		cmp = compare_blob(&clp->cl_name, name);
2772 		if (cmp > 0)
2773 			node = node->rb_left;
2774 		else if (cmp < 0)
2775 			node = node->rb_right;
2776 		else
2777 			return clp;
2778 	}
2779 	return NULL;
2780 }
2781 
2782 static void
2783 add_to_unconfirmed(struct nfs4_client *clp)
2784 {
2785 	unsigned int idhashval;
2786 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2787 
2788 	lockdep_assert_held(&nn->client_lock);
2789 
2790 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2791 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2792 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2793 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2794 	renew_client_locked(clp);
2795 }
2796 
2797 static void
2798 move_to_confirmed(struct nfs4_client *clp)
2799 {
2800 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2801 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2802 
2803 	lockdep_assert_held(&nn->client_lock);
2804 
2805 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2806 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2807 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2808 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
2809 	if (!test_and_set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags) &&
2810 	    clp->cl_nfsd_dentry &&
2811 	    clp->cl_nfsd_info_dentry)
2812 		fsnotify_dentry(clp->cl_nfsd_info_dentry, FS_MODIFY);
2813 	renew_client_locked(clp);
2814 }
2815 
2816 static struct nfs4_client *
2817 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2818 {
2819 	struct nfs4_client *clp;
2820 	unsigned int idhashval = clientid_hashval(clid->cl_id);
2821 
2822 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2823 		if (same_clid(&clp->cl_clientid, clid)) {
2824 			if ((bool)clp->cl_minorversion != sessions)
2825 				return NULL;
2826 			renew_client_locked(clp);
2827 			return clp;
2828 		}
2829 	}
2830 	return NULL;
2831 }
2832 
2833 static struct nfs4_client *
2834 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2835 {
2836 	struct list_head *tbl = nn->conf_id_hashtbl;
2837 
2838 	lockdep_assert_held(&nn->client_lock);
2839 	return find_client_in_id_table(tbl, clid, sessions);
2840 }
2841 
2842 static struct nfs4_client *
2843 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2844 {
2845 	struct list_head *tbl = nn->unconf_id_hashtbl;
2846 
2847 	lockdep_assert_held(&nn->client_lock);
2848 	return find_client_in_id_table(tbl, clid, sessions);
2849 }
2850 
2851 static bool clp_used_exchangeid(struct nfs4_client *clp)
2852 {
2853 	return clp->cl_exchange_flags != 0;
2854 }
2855 
2856 static struct nfs4_client *
2857 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2858 {
2859 	lockdep_assert_held(&nn->client_lock);
2860 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
2861 }
2862 
2863 static struct nfs4_client *
2864 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2865 {
2866 	lockdep_assert_held(&nn->client_lock);
2867 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2868 }
2869 
2870 static void
2871 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2872 {
2873 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2874 	struct sockaddr	*sa = svc_addr(rqstp);
2875 	u32 scopeid = rpc_get_scope_id(sa);
2876 	unsigned short expected_family;
2877 
2878 	/* Currently, we only support tcp and tcp6 for the callback channel */
2879 	if (se->se_callback_netid_len == 3 &&
2880 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2881 		expected_family = AF_INET;
2882 	else if (se->se_callback_netid_len == 4 &&
2883 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2884 		expected_family = AF_INET6;
2885 	else
2886 		goto out_err;
2887 
2888 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2889 					    se->se_callback_addr_len,
2890 					    (struct sockaddr *)&conn->cb_addr,
2891 					    sizeof(conn->cb_addr));
2892 
2893 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2894 		goto out_err;
2895 
2896 	if (conn->cb_addr.ss_family == AF_INET6)
2897 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2898 
2899 	conn->cb_prog = se->se_callback_prog;
2900 	conn->cb_ident = se->se_callback_ident;
2901 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2902 	trace_nfsd_cb_args(clp, conn);
2903 	return;
2904 out_err:
2905 	conn->cb_addr.ss_family = AF_UNSPEC;
2906 	conn->cb_addrlen = 0;
2907 	trace_nfsd_cb_nodelegs(clp);
2908 	return;
2909 }
2910 
2911 /*
2912  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2913  */
2914 static void
2915 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2916 {
2917 	struct xdr_buf *buf = resp->xdr->buf;
2918 	struct nfsd4_slot *slot = resp->cstate.slot;
2919 	unsigned int base;
2920 
2921 	dprintk("--> %s slot %p\n", __func__, slot);
2922 
2923 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2924 	slot->sl_opcnt = resp->opcnt;
2925 	slot->sl_status = resp->cstate.status;
2926 	free_svc_cred(&slot->sl_cred);
2927 	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2928 
2929 	if (!nfsd4_cache_this(resp)) {
2930 		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2931 		return;
2932 	}
2933 	slot->sl_flags |= NFSD4_SLOT_CACHED;
2934 
2935 	base = resp->cstate.data_offset;
2936 	slot->sl_datalen = buf->len - base;
2937 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2938 		WARN(1, "%s: sessions DRC could not cache compound\n",
2939 		     __func__);
2940 	return;
2941 }
2942 
2943 /*
2944  * Encode the replay sequence operation from the slot values.
2945  * If cachethis is FALSE encode the uncached rep error on the next
2946  * operation which sets resp->p and increments resp->opcnt for
2947  * nfs4svc_encode_compoundres.
2948  *
2949  */
2950 static __be32
2951 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2952 			  struct nfsd4_compoundres *resp)
2953 {
2954 	struct nfsd4_op *op;
2955 	struct nfsd4_slot *slot = resp->cstate.slot;
2956 
2957 	/* Encode the replayed sequence operation */
2958 	op = &args->ops[resp->opcnt - 1];
2959 	nfsd4_encode_operation(resp, op);
2960 
2961 	if (slot->sl_flags & NFSD4_SLOT_CACHED)
2962 		return op->status;
2963 	if (args->opcnt == 1) {
2964 		/*
2965 		 * The original operation wasn't a solo sequence--we
2966 		 * always cache those--so this retry must not match the
2967 		 * original:
2968 		 */
2969 		op->status = nfserr_seq_false_retry;
2970 	} else {
2971 		op = &args->ops[resp->opcnt++];
2972 		op->status = nfserr_retry_uncached_rep;
2973 		nfsd4_encode_operation(resp, op);
2974 	}
2975 	return op->status;
2976 }
2977 
2978 /*
2979  * The sequence operation is not cached because we can use the slot and
2980  * session values.
2981  */
2982 static __be32
2983 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2984 			 struct nfsd4_sequence *seq)
2985 {
2986 	struct nfsd4_slot *slot = resp->cstate.slot;
2987 	struct xdr_stream *xdr = resp->xdr;
2988 	__be32 *p;
2989 	__be32 status;
2990 
2991 	dprintk("--> %s slot %p\n", __func__, slot);
2992 
2993 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2994 	if (status)
2995 		return status;
2996 
2997 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2998 	if (!p) {
2999 		WARN_ON_ONCE(1);
3000 		return nfserr_serverfault;
3001 	}
3002 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3003 	xdr_commit_encode(xdr);
3004 
3005 	resp->opcnt = slot->sl_opcnt;
3006 	return slot->sl_status;
3007 }
3008 
3009 /*
3010  * Set the exchange_id flags returned by the server.
3011  */
3012 static void
3013 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3014 {
3015 #ifdef CONFIG_NFSD_PNFS
3016 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3017 #else
3018 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3019 #endif
3020 
3021 	/* Referrals are supported, Migration is not. */
3022 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3023 
3024 	/* set the wire flags to return to client. */
3025 	clid->flags = new->cl_exchange_flags;
3026 }
3027 
3028 static bool client_has_openowners(struct nfs4_client *clp)
3029 {
3030 	struct nfs4_openowner *oo;
3031 
3032 	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3033 		if (!list_empty(&oo->oo_owner.so_stateids))
3034 			return true;
3035 	}
3036 	return false;
3037 }
3038 
3039 static bool client_has_state(struct nfs4_client *clp)
3040 {
3041 	return client_has_openowners(clp)
3042 #ifdef CONFIG_NFSD_PNFS
3043 		|| !list_empty(&clp->cl_lo_states)
3044 #endif
3045 		|| !list_empty(&clp->cl_delegations)
3046 		|| !list_empty(&clp->cl_sessions)
3047 		|| !list_empty(&clp->async_copies);
3048 }
3049 
3050 static __be32 copy_impl_id(struct nfs4_client *clp,
3051 				struct nfsd4_exchange_id *exid)
3052 {
3053 	if (!exid->nii_domain.data)
3054 		return 0;
3055 	xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3056 	if (!clp->cl_nii_domain.data)
3057 		return nfserr_jukebox;
3058 	xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3059 	if (!clp->cl_nii_name.data)
3060 		return nfserr_jukebox;
3061 	clp->cl_nii_time = exid->nii_time;
3062 	return 0;
3063 }
3064 
3065 __be32
3066 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3067 		union nfsd4_op_u *u)
3068 {
3069 	struct nfsd4_exchange_id *exid = &u->exchange_id;
3070 	struct nfs4_client *conf, *new;
3071 	struct nfs4_client *unconf = NULL;
3072 	__be32 status;
3073 	char			addr_str[INET6_ADDRSTRLEN];
3074 	nfs4_verifier		verf = exid->verifier;
3075 	struct sockaddr		*sa = svc_addr(rqstp);
3076 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3077 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3078 
3079 	rpc_ntop(sa, addr_str, sizeof(addr_str));
3080 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3081 		"ip_addr=%s flags %x, spa_how %u\n",
3082 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
3083 		addr_str, exid->flags, exid->spa_how);
3084 
3085 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3086 		return nfserr_inval;
3087 
3088 	new = create_client(exid->clname, rqstp, &verf);
3089 	if (new == NULL)
3090 		return nfserr_jukebox;
3091 	status = copy_impl_id(new, exid);
3092 	if (status)
3093 		goto out_nolock;
3094 
3095 	switch (exid->spa_how) {
3096 	case SP4_MACH_CRED:
3097 		exid->spo_must_enforce[0] = 0;
3098 		exid->spo_must_enforce[1] = (
3099 			1 << (OP_BIND_CONN_TO_SESSION - 32) |
3100 			1 << (OP_EXCHANGE_ID - 32) |
3101 			1 << (OP_CREATE_SESSION - 32) |
3102 			1 << (OP_DESTROY_SESSION - 32) |
3103 			1 << (OP_DESTROY_CLIENTID - 32));
3104 
3105 		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3106 					1 << (OP_OPEN_DOWNGRADE) |
3107 					1 << (OP_LOCKU) |
3108 					1 << (OP_DELEGRETURN));
3109 
3110 		exid->spo_must_allow[1] &= (
3111 					1 << (OP_TEST_STATEID - 32) |
3112 					1 << (OP_FREE_STATEID - 32));
3113 		if (!svc_rqst_integrity_protected(rqstp)) {
3114 			status = nfserr_inval;
3115 			goto out_nolock;
3116 		}
3117 		/*
3118 		 * Sometimes userspace doesn't give us a principal.
3119 		 * Which is a bug, really.  Anyway, we can't enforce
3120 		 * MACH_CRED in that case, better to give up now:
3121 		 */
3122 		if (!new->cl_cred.cr_principal &&
3123 					!new->cl_cred.cr_raw_principal) {
3124 			status = nfserr_serverfault;
3125 			goto out_nolock;
3126 		}
3127 		new->cl_mach_cred = true;
3128 	case SP4_NONE:
3129 		break;
3130 	default:				/* checked by xdr code */
3131 		WARN_ON_ONCE(1);
3132 		fallthrough;
3133 	case SP4_SSV:
3134 		status = nfserr_encr_alg_unsupp;
3135 		goto out_nolock;
3136 	}
3137 
3138 	/* Cases below refer to rfc 5661 section 18.35.4: */
3139 	spin_lock(&nn->client_lock);
3140 	conf = find_confirmed_client_by_name(&exid->clname, nn);
3141 	if (conf) {
3142 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3143 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3144 
3145 		if (update) {
3146 			if (!clp_used_exchangeid(conf)) { /* buggy client */
3147 				status = nfserr_inval;
3148 				goto out;
3149 			}
3150 			if (!nfsd4_mach_creds_match(conf, rqstp)) {
3151 				status = nfserr_wrong_cred;
3152 				goto out;
3153 			}
3154 			if (!creds_match) { /* case 9 */
3155 				status = nfserr_perm;
3156 				goto out;
3157 			}
3158 			if (!verfs_match) { /* case 8 */
3159 				status = nfserr_not_same;
3160 				goto out;
3161 			}
3162 			/* case 6 */
3163 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3164 			goto out_copy;
3165 		}
3166 		if (!creds_match) { /* case 3 */
3167 			if (client_has_state(conf)) {
3168 				status = nfserr_clid_inuse;
3169 				goto out;
3170 			}
3171 			goto out_new;
3172 		}
3173 		if (verfs_match) { /* case 2 */
3174 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3175 			goto out_copy;
3176 		}
3177 		/* case 5, client reboot */
3178 		conf = NULL;
3179 		goto out_new;
3180 	}
3181 
3182 	if (update) { /* case 7 */
3183 		status = nfserr_noent;
3184 		goto out;
3185 	}
3186 
3187 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
3188 	if (unconf) /* case 4, possible retry or client restart */
3189 		unhash_client_locked(unconf);
3190 
3191 	/* case 1 (normal case) */
3192 out_new:
3193 	if (conf) {
3194 		status = mark_client_expired_locked(conf);
3195 		if (status)
3196 			goto out;
3197 	}
3198 	new->cl_minorversion = cstate->minorversion;
3199 	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3200 	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3201 
3202 	add_to_unconfirmed(new);
3203 	swap(new, conf);
3204 out_copy:
3205 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3206 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
3207 
3208 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3209 	nfsd4_set_ex_flags(conf, exid);
3210 
3211 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3212 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3213 	status = nfs_ok;
3214 
3215 out:
3216 	spin_unlock(&nn->client_lock);
3217 out_nolock:
3218 	if (new)
3219 		expire_client(new);
3220 	if (unconf)
3221 		expire_client(unconf);
3222 	return status;
3223 }
3224 
3225 static __be32
3226 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3227 {
3228 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3229 		slot_seqid);
3230 
3231 	/* The slot is in use, and no response has been sent. */
3232 	if (slot_inuse) {
3233 		if (seqid == slot_seqid)
3234 			return nfserr_jukebox;
3235 		else
3236 			return nfserr_seq_misordered;
3237 	}
3238 	/* Note unsigned 32-bit arithmetic handles wraparound: */
3239 	if (likely(seqid == slot_seqid + 1))
3240 		return nfs_ok;
3241 	if (seqid == slot_seqid)
3242 		return nfserr_replay_cache;
3243 	return nfserr_seq_misordered;
3244 }
3245 
3246 /*
3247  * Cache the create session result into the create session single DRC
3248  * slot cache by saving the xdr structure. sl_seqid has been set.
3249  * Do this for solo or embedded create session operations.
3250  */
3251 static void
3252 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3253 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
3254 {
3255 	slot->sl_status = nfserr;
3256 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3257 }
3258 
3259 static __be32
3260 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3261 			    struct nfsd4_clid_slot *slot)
3262 {
3263 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3264 	return slot->sl_status;
3265 }
3266 
3267 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
3268 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3269 			1 +	/* MIN tag is length with zero, only length */ \
3270 			3 +	/* version, opcount, opcode */ \
3271 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3272 				/* seqid, slotID, slotID, cache */ \
3273 			4 ) * sizeof(__be32))
3274 
3275 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3276 			2 +	/* verifier: AUTH_NULL, length 0 */\
3277 			1 +	/* status */ \
3278 			1 +	/* MIN tag is length with zero, only length */ \
3279 			3 +	/* opcount, opcode, opstatus*/ \
3280 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3281 				/* seqid, slotID, slotID, slotID, status */ \
3282 			5 ) * sizeof(__be32))
3283 
3284 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3285 {
3286 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3287 
3288 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3289 		return nfserr_toosmall;
3290 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3291 		return nfserr_toosmall;
3292 	ca->headerpadsz = 0;
3293 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3294 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3295 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3296 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3297 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3298 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3299 	/*
3300 	 * Note decreasing slot size below client's request may make it
3301 	 * difficult for client to function correctly, whereas
3302 	 * decreasing the number of slots will (just?) affect
3303 	 * performance.  When short on memory we therefore prefer to
3304 	 * decrease number of slots instead of their size.  Clients that
3305 	 * request larger slots than they need will get poor results:
3306 	 * Note that we always allow at least one slot, because our
3307 	 * accounting is soft and provides no guarantees either way.
3308 	 */
3309 	ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3310 
3311 	return nfs_ok;
3312 }
3313 
3314 /*
3315  * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3316  * These are based on similar macros in linux/sunrpc/msg_prot.h .
3317  */
3318 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3319 	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3320 
3321 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3322 	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3323 
3324 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
3325 				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3326 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
3327 				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3328 				 sizeof(__be32))
3329 
3330 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3331 {
3332 	ca->headerpadsz = 0;
3333 
3334 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3335 		return nfserr_toosmall;
3336 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3337 		return nfserr_toosmall;
3338 	ca->maxresp_cached = 0;
3339 	if (ca->maxops < 2)
3340 		return nfserr_toosmall;
3341 
3342 	return nfs_ok;
3343 }
3344 
3345 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3346 {
3347 	switch (cbs->flavor) {
3348 	case RPC_AUTH_NULL:
3349 	case RPC_AUTH_UNIX:
3350 		return nfs_ok;
3351 	default:
3352 		/*
3353 		 * GSS case: the spec doesn't allow us to return this
3354 		 * error.  But it also doesn't allow us not to support
3355 		 * GSS.
3356 		 * I'd rather this fail hard than return some error the
3357 		 * client might think it can already handle:
3358 		 */
3359 		return nfserr_encr_alg_unsupp;
3360 	}
3361 }
3362 
3363 __be32
3364 nfsd4_create_session(struct svc_rqst *rqstp,
3365 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3366 {
3367 	struct nfsd4_create_session *cr_ses = &u->create_session;
3368 	struct sockaddr *sa = svc_addr(rqstp);
3369 	struct nfs4_client *conf, *unconf;
3370 	struct nfs4_client *old = NULL;
3371 	struct nfsd4_session *new;
3372 	struct nfsd4_conn *conn;
3373 	struct nfsd4_clid_slot *cs_slot = NULL;
3374 	__be32 status = 0;
3375 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3376 
3377 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3378 		return nfserr_inval;
3379 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3380 	if (status)
3381 		return status;
3382 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3383 	if (status)
3384 		return status;
3385 	status = check_backchannel_attrs(&cr_ses->back_channel);
3386 	if (status)
3387 		goto out_release_drc_mem;
3388 	status = nfserr_jukebox;
3389 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3390 	if (!new)
3391 		goto out_release_drc_mem;
3392 	conn = alloc_conn_from_crses(rqstp, cr_ses);
3393 	if (!conn)
3394 		goto out_free_session;
3395 
3396 	spin_lock(&nn->client_lock);
3397 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3398 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3399 	WARN_ON_ONCE(conf && unconf);
3400 
3401 	if (conf) {
3402 		status = nfserr_wrong_cred;
3403 		if (!nfsd4_mach_creds_match(conf, rqstp))
3404 			goto out_free_conn;
3405 		cs_slot = &conf->cl_cs_slot;
3406 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3407 		if (status) {
3408 			if (status == nfserr_replay_cache)
3409 				status = nfsd4_replay_create_session(cr_ses, cs_slot);
3410 			goto out_free_conn;
3411 		}
3412 	} else if (unconf) {
3413 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3414 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3415 			status = nfserr_clid_inuse;
3416 			goto out_free_conn;
3417 		}
3418 		status = nfserr_wrong_cred;
3419 		if (!nfsd4_mach_creds_match(unconf, rqstp))
3420 			goto out_free_conn;
3421 		cs_slot = &unconf->cl_cs_slot;
3422 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3423 		if (status) {
3424 			/* an unconfirmed replay returns misordered */
3425 			status = nfserr_seq_misordered;
3426 			goto out_free_conn;
3427 		}
3428 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3429 		if (old) {
3430 			status = mark_client_expired_locked(old);
3431 			if (status) {
3432 				old = NULL;
3433 				goto out_free_conn;
3434 			}
3435 		}
3436 		move_to_confirmed(unconf);
3437 		conf = unconf;
3438 	} else {
3439 		status = nfserr_stale_clientid;
3440 		goto out_free_conn;
3441 	}
3442 	status = nfs_ok;
3443 	/* Persistent sessions are not supported */
3444 	cr_ses->flags &= ~SESSION4_PERSIST;
3445 	/* Upshifting from TCP to RDMA is not supported */
3446 	cr_ses->flags &= ~SESSION4_RDMA;
3447 
3448 	init_session(rqstp, new, conf, cr_ses);
3449 	nfsd4_get_session_locked(new);
3450 
3451 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3452 	       NFS4_MAX_SESSIONID_LEN);
3453 	cs_slot->sl_seqid++;
3454 	cr_ses->seqid = cs_slot->sl_seqid;
3455 
3456 	/* cache solo and embedded create sessions under the client_lock */
3457 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
3458 	spin_unlock(&nn->client_lock);
3459 	/* init connection and backchannel */
3460 	nfsd4_init_conn(rqstp, conn, new);
3461 	nfsd4_put_session(new);
3462 	if (old)
3463 		expire_client(old);
3464 	return status;
3465 out_free_conn:
3466 	spin_unlock(&nn->client_lock);
3467 	free_conn(conn);
3468 	if (old)
3469 		expire_client(old);
3470 out_free_session:
3471 	__free_session(new);
3472 out_release_drc_mem:
3473 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
3474 	return status;
3475 }
3476 
3477 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3478 {
3479 	switch (*dir) {
3480 	case NFS4_CDFC4_FORE:
3481 	case NFS4_CDFC4_BACK:
3482 		return nfs_ok;
3483 	case NFS4_CDFC4_FORE_OR_BOTH:
3484 	case NFS4_CDFC4_BACK_OR_BOTH:
3485 		*dir = NFS4_CDFC4_BOTH;
3486 		return nfs_ok;
3487 	}
3488 	return nfserr_inval;
3489 }
3490 
3491 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3492 		struct nfsd4_compound_state *cstate,
3493 		union nfsd4_op_u *u)
3494 {
3495 	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3496 	struct nfsd4_session *session = cstate->session;
3497 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3498 	__be32 status;
3499 
3500 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3501 	if (status)
3502 		return status;
3503 	spin_lock(&nn->client_lock);
3504 	session->se_cb_prog = bc->bc_cb_program;
3505 	session->se_cb_sec = bc->bc_cb_sec;
3506 	spin_unlock(&nn->client_lock);
3507 
3508 	nfsd4_probe_callback(session->se_client);
3509 
3510 	return nfs_ok;
3511 }
3512 
3513 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3514 {
3515 	struct nfsd4_conn *c;
3516 
3517 	list_for_each_entry(c, &s->se_conns, cn_persession) {
3518 		if (c->cn_xprt == xpt) {
3519 			return c;
3520 		}
3521 	}
3522 	return NULL;
3523 }
3524 
3525 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3526 				struct nfsd4_session *session, u32 req)
3527 {
3528 	struct nfs4_client *clp = session->se_client;
3529 	struct svc_xprt *xpt = rqst->rq_xprt;
3530 	struct nfsd4_conn *c;
3531 	__be32 status;
3532 
3533 	/* Following the last paragraph of RFC 5661 Section 18.34.3: */
3534 	spin_lock(&clp->cl_lock);
3535 	c = __nfsd4_find_conn(xpt, session);
3536 	if (!c)
3537 		status = nfserr_noent;
3538 	else if (req == c->cn_flags)
3539 		status = nfs_ok;
3540 	else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3541 				c->cn_flags != NFS4_CDFC4_BACK)
3542 		status = nfs_ok;
3543 	else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3544 				c->cn_flags != NFS4_CDFC4_FORE)
3545 		status = nfs_ok;
3546 	else
3547 		status = nfserr_inval;
3548 	spin_unlock(&clp->cl_lock);
3549 	return status;
3550 }
3551 
3552 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3553 		     struct nfsd4_compound_state *cstate,
3554 		     union nfsd4_op_u *u)
3555 {
3556 	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3557 	__be32 status;
3558 	struct nfsd4_conn *conn;
3559 	struct nfsd4_session *session;
3560 	struct net *net = SVC_NET(rqstp);
3561 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3562 
3563 	if (!nfsd4_last_compound_op(rqstp))
3564 		return nfserr_not_only_op;
3565 	spin_lock(&nn->client_lock);
3566 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3567 	spin_unlock(&nn->client_lock);
3568 	if (!session)
3569 		goto out_no_session;
3570 	status = nfserr_wrong_cred;
3571 	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3572 		goto out;
3573 	status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
3574 	if (status == nfs_ok || status == nfserr_inval)
3575 		goto out;
3576 	status = nfsd4_map_bcts_dir(&bcts->dir);
3577 	if (status)
3578 		goto out;
3579 	conn = alloc_conn(rqstp, bcts->dir);
3580 	status = nfserr_jukebox;
3581 	if (!conn)
3582 		goto out;
3583 	nfsd4_init_conn(rqstp, conn, session);
3584 	status = nfs_ok;
3585 out:
3586 	nfsd4_put_session(session);
3587 out_no_session:
3588 	return status;
3589 }
3590 
3591 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3592 {
3593 	if (!cstate->session)
3594 		return false;
3595 	return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3596 }
3597 
3598 __be32
3599 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3600 		union nfsd4_op_u *u)
3601 {
3602 	struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3603 	struct nfsd4_session *ses;
3604 	__be32 status;
3605 	int ref_held_by_me = 0;
3606 	struct net *net = SVC_NET(r);
3607 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3608 
3609 	status = nfserr_not_only_op;
3610 	if (nfsd4_compound_in_session(cstate, sessionid)) {
3611 		if (!nfsd4_last_compound_op(r))
3612 			goto out;
3613 		ref_held_by_me++;
3614 	}
3615 	dump_sessionid(__func__, sessionid);
3616 	spin_lock(&nn->client_lock);
3617 	ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3618 	if (!ses)
3619 		goto out_client_lock;
3620 	status = nfserr_wrong_cred;
3621 	if (!nfsd4_mach_creds_match(ses->se_client, r))
3622 		goto out_put_session;
3623 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3624 	if (status)
3625 		goto out_put_session;
3626 	unhash_session(ses);
3627 	spin_unlock(&nn->client_lock);
3628 
3629 	nfsd4_probe_callback_sync(ses->se_client);
3630 
3631 	spin_lock(&nn->client_lock);
3632 	status = nfs_ok;
3633 out_put_session:
3634 	nfsd4_put_session_locked(ses);
3635 out_client_lock:
3636 	spin_unlock(&nn->client_lock);
3637 out:
3638 	return status;
3639 }
3640 
3641 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3642 {
3643 	struct nfs4_client *clp = ses->se_client;
3644 	struct nfsd4_conn *c;
3645 	__be32 status = nfs_ok;
3646 	int ret;
3647 
3648 	spin_lock(&clp->cl_lock);
3649 	c = __nfsd4_find_conn(new->cn_xprt, ses);
3650 	if (c)
3651 		goto out_free;
3652 	status = nfserr_conn_not_bound_to_session;
3653 	if (clp->cl_mach_cred)
3654 		goto out_free;
3655 	__nfsd4_hash_conn(new, ses);
3656 	spin_unlock(&clp->cl_lock);
3657 	ret = nfsd4_register_conn(new);
3658 	if (ret)
3659 		/* oops; xprt is already down: */
3660 		nfsd4_conn_lost(&new->cn_xpt_user);
3661 	return nfs_ok;
3662 out_free:
3663 	spin_unlock(&clp->cl_lock);
3664 	free_conn(new);
3665 	return status;
3666 }
3667 
3668 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3669 {
3670 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
3671 
3672 	return args->opcnt > session->se_fchannel.maxops;
3673 }
3674 
3675 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3676 				  struct nfsd4_session *session)
3677 {
3678 	struct xdr_buf *xb = &rqstp->rq_arg;
3679 
3680 	return xb->len > session->se_fchannel.maxreq_sz;
3681 }
3682 
3683 static bool replay_matches_cache(struct svc_rqst *rqstp,
3684 		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3685 {
3686 	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3687 
3688 	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3689 	    (bool)seq->cachethis)
3690 		return false;
3691 	/*
3692 	 * If there's an error then the reply can have fewer ops than
3693 	 * the call.
3694 	 */
3695 	if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3696 		return false;
3697 	/*
3698 	 * But if we cached a reply with *more* ops than the call you're
3699 	 * sending us now, then this new call is clearly not really a
3700 	 * replay of the old one:
3701 	 */
3702 	if (slot->sl_opcnt > argp->opcnt)
3703 		return false;
3704 	/* This is the only check explicitly called by spec: */
3705 	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3706 		return false;
3707 	/*
3708 	 * There may be more comparisons we could actually do, but the
3709 	 * spec doesn't require us to catch every case where the calls
3710 	 * don't match (that would require caching the call as well as
3711 	 * the reply), so we don't bother.
3712 	 */
3713 	return true;
3714 }
3715 
3716 __be32
3717 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3718 		union nfsd4_op_u *u)
3719 {
3720 	struct nfsd4_sequence *seq = &u->sequence;
3721 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3722 	struct xdr_stream *xdr = resp->xdr;
3723 	struct nfsd4_session *session;
3724 	struct nfs4_client *clp;
3725 	struct nfsd4_slot *slot;
3726 	struct nfsd4_conn *conn;
3727 	__be32 status;
3728 	int buflen;
3729 	struct net *net = SVC_NET(rqstp);
3730 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3731 
3732 	if (resp->opcnt != 1)
3733 		return nfserr_sequence_pos;
3734 
3735 	/*
3736 	 * Will be either used or freed by nfsd4_sequence_check_conn
3737 	 * below.
3738 	 */
3739 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3740 	if (!conn)
3741 		return nfserr_jukebox;
3742 
3743 	spin_lock(&nn->client_lock);
3744 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3745 	if (!session)
3746 		goto out_no_session;
3747 	clp = session->se_client;
3748 
3749 	status = nfserr_too_many_ops;
3750 	if (nfsd4_session_too_many_ops(rqstp, session))
3751 		goto out_put_session;
3752 
3753 	status = nfserr_req_too_big;
3754 	if (nfsd4_request_too_big(rqstp, session))
3755 		goto out_put_session;
3756 
3757 	status = nfserr_badslot;
3758 	if (seq->slotid >= session->se_fchannel.maxreqs)
3759 		goto out_put_session;
3760 
3761 	slot = session->se_slots[seq->slotid];
3762 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
3763 
3764 	/* We do not negotiate the number of slots yet, so set the
3765 	 * maxslots to the session maxreqs which is used to encode
3766 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
3767 	seq->maxslots = session->se_fchannel.maxreqs;
3768 
3769 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3770 					slot->sl_flags & NFSD4_SLOT_INUSE);
3771 	if (status == nfserr_replay_cache) {
3772 		status = nfserr_seq_misordered;
3773 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3774 			goto out_put_session;
3775 		status = nfserr_seq_false_retry;
3776 		if (!replay_matches_cache(rqstp, seq, slot))
3777 			goto out_put_session;
3778 		cstate->slot = slot;
3779 		cstate->session = session;
3780 		cstate->clp = clp;
3781 		/* Return the cached reply status and set cstate->status
3782 		 * for nfsd4_proc_compound processing */
3783 		status = nfsd4_replay_cache_entry(resp, seq);
3784 		cstate->status = nfserr_replay_cache;
3785 		goto out;
3786 	}
3787 	if (status)
3788 		goto out_put_session;
3789 
3790 	status = nfsd4_sequence_check_conn(conn, session);
3791 	conn = NULL;
3792 	if (status)
3793 		goto out_put_session;
3794 
3795 	buflen = (seq->cachethis) ?
3796 			session->se_fchannel.maxresp_cached :
3797 			session->se_fchannel.maxresp_sz;
3798 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3799 				    nfserr_rep_too_big;
3800 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3801 		goto out_put_session;
3802 	svc_reserve(rqstp, buflen);
3803 
3804 	status = nfs_ok;
3805 	/* Success! bump slot seqid */
3806 	slot->sl_seqid = seq->seqid;
3807 	slot->sl_flags |= NFSD4_SLOT_INUSE;
3808 	if (seq->cachethis)
3809 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3810 	else
3811 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3812 
3813 	cstate->slot = slot;
3814 	cstate->session = session;
3815 	cstate->clp = clp;
3816 
3817 out:
3818 	switch (clp->cl_cb_state) {
3819 	case NFSD4_CB_DOWN:
3820 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3821 		break;
3822 	case NFSD4_CB_FAULT:
3823 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3824 		break;
3825 	default:
3826 		seq->status_flags = 0;
3827 	}
3828 	if (!list_empty(&clp->cl_revoked))
3829 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3830 out_no_session:
3831 	if (conn)
3832 		free_conn(conn);
3833 	spin_unlock(&nn->client_lock);
3834 	return status;
3835 out_put_session:
3836 	nfsd4_put_session_locked(session);
3837 	goto out_no_session;
3838 }
3839 
3840 void
3841 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3842 {
3843 	struct nfsd4_compound_state *cs = &resp->cstate;
3844 
3845 	if (nfsd4_has_session(cs)) {
3846 		if (cs->status != nfserr_replay_cache) {
3847 			nfsd4_store_cache_entry(resp);
3848 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3849 		}
3850 		/* Drop session reference that was taken in nfsd4_sequence() */
3851 		nfsd4_put_session(cs->session);
3852 	} else if (cs->clp)
3853 		put_client_renew(cs->clp);
3854 }
3855 
3856 __be32
3857 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3858 		struct nfsd4_compound_state *cstate,
3859 		union nfsd4_op_u *u)
3860 {
3861 	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3862 	struct nfs4_client *conf, *unconf;
3863 	struct nfs4_client *clp = NULL;
3864 	__be32 status = 0;
3865 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3866 
3867 	spin_lock(&nn->client_lock);
3868 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3869 	conf = find_confirmed_client(&dc->clientid, true, nn);
3870 	WARN_ON_ONCE(conf && unconf);
3871 
3872 	if (conf) {
3873 		if (client_has_state(conf)) {
3874 			status = nfserr_clientid_busy;
3875 			goto out;
3876 		}
3877 		status = mark_client_expired_locked(conf);
3878 		if (status)
3879 			goto out;
3880 		clp = conf;
3881 	} else if (unconf)
3882 		clp = unconf;
3883 	else {
3884 		status = nfserr_stale_clientid;
3885 		goto out;
3886 	}
3887 	if (!nfsd4_mach_creds_match(clp, rqstp)) {
3888 		clp = NULL;
3889 		status = nfserr_wrong_cred;
3890 		goto out;
3891 	}
3892 	unhash_client_locked(clp);
3893 out:
3894 	spin_unlock(&nn->client_lock);
3895 	if (clp)
3896 		expire_client(clp);
3897 	return status;
3898 }
3899 
3900 __be32
3901 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3902 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3903 {
3904 	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3905 	struct nfs4_client *clp = cstate->clp;
3906 	__be32 status = 0;
3907 
3908 	if (rc->rca_one_fs) {
3909 		if (!cstate->current_fh.fh_dentry)
3910 			return nfserr_nofilehandle;
3911 		/*
3912 		 * We don't take advantage of the rca_one_fs case.
3913 		 * That's OK, it's optional, we can safely ignore it.
3914 		 */
3915 		return nfs_ok;
3916 	}
3917 
3918 	status = nfserr_complete_already;
3919 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
3920 		goto out;
3921 
3922 	status = nfserr_stale_clientid;
3923 	if (is_client_expired(clp))
3924 		/*
3925 		 * The following error isn't really legal.
3926 		 * But we only get here if the client just explicitly
3927 		 * destroyed the client.  Surely it no longer cares what
3928 		 * error it gets back on an operation for the dead
3929 		 * client.
3930 		 */
3931 		goto out;
3932 
3933 	status = nfs_ok;
3934 	nfsd4_client_record_create(clp);
3935 	inc_reclaim_complete(clp);
3936 out:
3937 	return status;
3938 }
3939 
3940 __be32
3941 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3942 		  union nfsd4_op_u *u)
3943 {
3944 	struct nfsd4_setclientid *setclid = &u->setclientid;
3945 	struct xdr_netobj 	clname = setclid->se_name;
3946 	nfs4_verifier		clverifier = setclid->se_verf;
3947 	struct nfs4_client	*conf, *new;
3948 	struct nfs4_client	*unconf = NULL;
3949 	__be32 			status;
3950 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3951 
3952 	new = create_client(clname, rqstp, &clverifier);
3953 	if (new == NULL)
3954 		return nfserr_jukebox;
3955 	/* Cases below refer to rfc 3530 section 14.2.33: */
3956 	spin_lock(&nn->client_lock);
3957 	conf = find_confirmed_client_by_name(&clname, nn);
3958 	if (conf && client_has_state(conf)) {
3959 		/* case 0: */
3960 		status = nfserr_clid_inuse;
3961 		if (clp_used_exchangeid(conf))
3962 			goto out;
3963 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3964 			trace_nfsd_clid_inuse_err(conf);
3965 			goto out;
3966 		}
3967 	}
3968 	unconf = find_unconfirmed_client_by_name(&clname, nn);
3969 	if (unconf)
3970 		unhash_client_locked(unconf);
3971 	/* We need to handle only case 1: probable callback update */
3972 	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3973 		copy_clid(new, conf);
3974 		gen_confirm(new, nn);
3975 	}
3976 	new->cl_minorversion = 0;
3977 	gen_callback(new, setclid, rqstp);
3978 	add_to_unconfirmed(new);
3979 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3980 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3981 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3982 	new = NULL;
3983 	status = nfs_ok;
3984 out:
3985 	spin_unlock(&nn->client_lock);
3986 	if (new)
3987 		free_client(new);
3988 	if (unconf)
3989 		expire_client(unconf);
3990 	return status;
3991 }
3992 
3993 
3994 __be32
3995 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3996 			struct nfsd4_compound_state *cstate,
3997 			union nfsd4_op_u *u)
3998 {
3999 	struct nfsd4_setclientid_confirm *setclientid_confirm =
4000 			&u->setclientid_confirm;
4001 	struct nfs4_client *conf, *unconf;
4002 	struct nfs4_client *old = NULL;
4003 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4004 	clientid_t * clid = &setclientid_confirm->sc_clientid;
4005 	__be32 status;
4006 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4007 
4008 	if (STALE_CLIENTID(clid, nn))
4009 		return nfserr_stale_clientid;
4010 
4011 	spin_lock(&nn->client_lock);
4012 	conf = find_confirmed_client(clid, false, nn);
4013 	unconf = find_unconfirmed_client(clid, false, nn);
4014 	/*
4015 	 * We try hard to give out unique clientid's, so if we get an
4016 	 * attempt to confirm the same clientid with a different cred,
4017 	 * the client may be buggy; this should never happen.
4018 	 *
4019 	 * Nevertheless, RFC 7530 recommends INUSE for this case:
4020 	 */
4021 	status = nfserr_clid_inuse;
4022 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
4023 		goto out;
4024 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
4025 		goto out;
4026 	/* cases below refer to rfc 3530 section 14.2.34: */
4027 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4028 		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4029 			/* case 2: probable retransmit */
4030 			status = nfs_ok;
4031 		} else /* case 4: client hasn't noticed we rebooted yet? */
4032 			status = nfserr_stale_clientid;
4033 		goto out;
4034 	}
4035 	status = nfs_ok;
4036 	if (conf) { /* case 1: callback update */
4037 		old = unconf;
4038 		unhash_client_locked(old);
4039 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4040 	} else { /* case 3: normal case; new or rebooted client */
4041 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4042 		if (old) {
4043 			status = nfserr_clid_inuse;
4044 			if (client_has_state(old)
4045 					&& !same_creds(&unconf->cl_cred,
4046 							&old->cl_cred))
4047 				goto out;
4048 			status = mark_client_expired_locked(old);
4049 			if (status) {
4050 				old = NULL;
4051 				goto out;
4052 			}
4053 		}
4054 		move_to_confirmed(unconf);
4055 		conf = unconf;
4056 	}
4057 	get_client_locked(conf);
4058 	spin_unlock(&nn->client_lock);
4059 	nfsd4_probe_callback(conf);
4060 	spin_lock(&nn->client_lock);
4061 	put_client_renew_locked(conf);
4062 out:
4063 	spin_unlock(&nn->client_lock);
4064 	if (old)
4065 		expire_client(old);
4066 	return status;
4067 }
4068 
4069 static struct nfs4_file *nfsd4_alloc_file(void)
4070 {
4071 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
4072 }
4073 
4074 /* OPEN Share state helper functions */
4075 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
4076 				struct nfs4_file *fp)
4077 {
4078 	lockdep_assert_held(&state_lock);
4079 
4080 	refcount_set(&fp->fi_ref, 1);
4081 	spin_lock_init(&fp->fi_lock);
4082 	INIT_LIST_HEAD(&fp->fi_stateids);
4083 	INIT_LIST_HEAD(&fp->fi_delegations);
4084 	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4085 	fh_copy_shallow(&fp->fi_fhandle, fh);
4086 	fp->fi_deleg_file = NULL;
4087 	fp->fi_had_conflict = false;
4088 	fp->fi_share_deny = 0;
4089 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4090 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
4091 #ifdef CONFIG_NFSD_PNFS
4092 	INIT_LIST_HEAD(&fp->fi_lo_states);
4093 	atomic_set(&fp->fi_lo_recalls, 0);
4094 #endif
4095 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4096 }
4097 
4098 void
4099 nfsd4_free_slabs(void)
4100 {
4101 	kmem_cache_destroy(client_slab);
4102 	kmem_cache_destroy(openowner_slab);
4103 	kmem_cache_destroy(lockowner_slab);
4104 	kmem_cache_destroy(file_slab);
4105 	kmem_cache_destroy(stateid_slab);
4106 	kmem_cache_destroy(deleg_slab);
4107 	kmem_cache_destroy(odstate_slab);
4108 }
4109 
4110 int
4111 nfsd4_init_slabs(void)
4112 {
4113 	client_slab = kmem_cache_create("nfsd4_clients",
4114 			sizeof(struct nfs4_client), 0, 0, NULL);
4115 	if (client_slab == NULL)
4116 		goto out;
4117 	openowner_slab = kmem_cache_create("nfsd4_openowners",
4118 			sizeof(struct nfs4_openowner), 0, 0, NULL);
4119 	if (openowner_slab == NULL)
4120 		goto out_free_client_slab;
4121 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4122 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
4123 	if (lockowner_slab == NULL)
4124 		goto out_free_openowner_slab;
4125 	file_slab = kmem_cache_create("nfsd4_files",
4126 			sizeof(struct nfs4_file), 0, 0, NULL);
4127 	if (file_slab == NULL)
4128 		goto out_free_lockowner_slab;
4129 	stateid_slab = kmem_cache_create("nfsd4_stateids",
4130 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4131 	if (stateid_slab == NULL)
4132 		goto out_free_file_slab;
4133 	deleg_slab = kmem_cache_create("nfsd4_delegations",
4134 			sizeof(struct nfs4_delegation), 0, 0, NULL);
4135 	if (deleg_slab == NULL)
4136 		goto out_free_stateid_slab;
4137 	odstate_slab = kmem_cache_create("nfsd4_odstate",
4138 			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4139 	if (odstate_slab == NULL)
4140 		goto out_free_deleg_slab;
4141 	return 0;
4142 
4143 out_free_deleg_slab:
4144 	kmem_cache_destroy(deleg_slab);
4145 out_free_stateid_slab:
4146 	kmem_cache_destroy(stateid_slab);
4147 out_free_file_slab:
4148 	kmem_cache_destroy(file_slab);
4149 out_free_lockowner_slab:
4150 	kmem_cache_destroy(lockowner_slab);
4151 out_free_openowner_slab:
4152 	kmem_cache_destroy(openowner_slab);
4153 out_free_client_slab:
4154 	kmem_cache_destroy(client_slab);
4155 out:
4156 	return -ENOMEM;
4157 }
4158 
4159 static void init_nfs4_replay(struct nfs4_replay *rp)
4160 {
4161 	rp->rp_status = nfserr_serverfault;
4162 	rp->rp_buflen = 0;
4163 	rp->rp_buf = rp->rp_ibuf;
4164 	mutex_init(&rp->rp_mutex);
4165 }
4166 
4167 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4168 		struct nfs4_stateowner *so)
4169 {
4170 	if (!nfsd4_has_session(cstate)) {
4171 		mutex_lock(&so->so_replay.rp_mutex);
4172 		cstate->replay_owner = nfs4_get_stateowner(so);
4173 	}
4174 }
4175 
4176 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4177 {
4178 	struct nfs4_stateowner *so = cstate->replay_owner;
4179 
4180 	if (so != NULL) {
4181 		cstate->replay_owner = NULL;
4182 		mutex_unlock(&so->so_replay.rp_mutex);
4183 		nfs4_put_stateowner(so);
4184 	}
4185 }
4186 
4187 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4188 {
4189 	struct nfs4_stateowner *sop;
4190 
4191 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
4192 	if (!sop)
4193 		return NULL;
4194 
4195 	xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4196 	if (!sop->so_owner.data) {
4197 		kmem_cache_free(slab, sop);
4198 		return NULL;
4199 	}
4200 
4201 	INIT_LIST_HEAD(&sop->so_stateids);
4202 	sop->so_client = clp;
4203 	init_nfs4_replay(&sop->so_replay);
4204 	atomic_set(&sop->so_count, 1);
4205 	return sop;
4206 }
4207 
4208 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4209 {
4210 	lockdep_assert_held(&clp->cl_lock);
4211 
4212 	list_add(&oo->oo_owner.so_strhash,
4213 		 &clp->cl_ownerstr_hashtbl[strhashval]);
4214 	list_add(&oo->oo_perclient, &clp->cl_openowners);
4215 }
4216 
4217 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4218 {
4219 	unhash_openowner_locked(openowner(so));
4220 }
4221 
4222 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4223 {
4224 	struct nfs4_openowner *oo = openowner(so);
4225 
4226 	kmem_cache_free(openowner_slab, oo);
4227 }
4228 
4229 static const struct nfs4_stateowner_operations openowner_ops = {
4230 	.so_unhash =	nfs4_unhash_openowner,
4231 	.so_free =	nfs4_free_openowner,
4232 };
4233 
4234 static struct nfs4_ol_stateid *
4235 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4236 {
4237 	struct nfs4_ol_stateid *local, *ret = NULL;
4238 	struct nfs4_openowner *oo = open->op_openowner;
4239 
4240 	lockdep_assert_held(&fp->fi_lock);
4241 
4242 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4243 		/* ignore lock owners */
4244 		if (local->st_stateowner->so_is_open_owner == 0)
4245 			continue;
4246 		if (local->st_stateowner != &oo->oo_owner)
4247 			continue;
4248 		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4249 			ret = local;
4250 			refcount_inc(&ret->st_stid.sc_count);
4251 			break;
4252 		}
4253 	}
4254 	return ret;
4255 }
4256 
4257 static __be32
4258 nfsd4_verify_open_stid(struct nfs4_stid *s)
4259 {
4260 	__be32 ret = nfs_ok;
4261 
4262 	switch (s->sc_type) {
4263 	default:
4264 		break;
4265 	case 0:
4266 	case NFS4_CLOSED_STID:
4267 	case NFS4_CLOSED_DELEG_STID:
4268 		ret = nfserr_bad_stateid;
4269 		break;
4270 	case NFS4_REVOKED_DELEG_STID:
4271 		ret = nfserr_deleg_revoked;
4272 	}
4273 	return ret;
4274 }
4275 
4276 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4277 static __be32
4278 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4279 {
4280 	__be32 ret;
4281 
4282 	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4283 	ret = nfsd4_verify_open_stid(&stp->st_stid);
4284 	if (ret != nfs_ok)
4285 		mutex_unlock(&stp->st_mutex);
4286 	return ret;
4287 }
4288 
4289 static struct nfs4_ol_stateid *
4290 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4291 {
4292 	struct nfs4_ol_stateid *stp;
4293 	for (;;) {
4294 		spin_lock(&fp->fi_lock);
4295 		stp = nfsd4_find_existing_open(fp, open);
4296 		spin_unlock(&fp->fi_lock);
4297 		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4298 			break;
4299 		nfs4_put_stid(&stp->st_stid);
4300 	}
4301 	return stp;
4302 }
4303 
4304 static struct nfs4_openowner *
4305 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4306 			   struct nfsd4_compound_state *cstate)
4307 {
4308 	struct nfs4_client *clp = cstate->clp;
4309 	struct nfs4_openowner *oo, *ret;
4310 
4311 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4312 	if (!oo)
4313 		return NULL;
4314 	oo->oo_owner.so_ops = &openowner_ops;
4315 	oo->oo_owner.so_is_open_owner = 1;
4316 	oo->oo_owner.so_seqid = open->op_seqid;
4317 	oo->oo_flags = 0;
4318 	if (nfsd4_has_session(cstate))
4319 		oo->oo_flags |= NFS4_OO_CONFIRMED;
4320 	oo->oo_time = 0;
4321 	oo->oo_last_closed_stid = NULL;
4322 	INIT_LIST_HEAD(&oo->oo_close_lru);
4323 	spin_lock(&clp->cl_lock);
4324 	ret = find_openstateowner_str_locked(strhashval, open, clp);
4325 	if (ret == NULL) {
4326 		hash_openowner(oo, clp, strhashval);
4327 		ret = oo;
4328 	} else
4329 		nfs4_free_stateowner(&oo->oo_owner);
4330 
4331 	spin_unlock(&clp->cl_lock);
4332 	return ret;
4333 }
4334 
4335 static struct nfs4_ol_stateid *
4336 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4337 {
4338 
4339 	struct nfs4_openowner *oo = open->op_openowner;
4340 	struct nfs4_ol_stateid *retstp = NULL;
4341 	struct nfs4_ol_stateid *stp;
4342 
4343 	stp = open->op_stp;
4344 	/* We are moving these outside of the spinlocks to avoid the warnings */
4345 	mutex_init(&stp->st_mutex);
4346 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4347 
4348 retry:
4349 	spin_lock(&oo->oo_owner.so_client->cl_lock);
4350 	spin_lock(&fp->fi_lock);
4351 
4352 	retstp = nfsd4_find_existing_open(fp, open);
4353 	if (retstp)
4354 		goto out_unlock;
4355 
4356 	open->op_stp = NULL;
4357 	refcount_inc(&stp->st_stid.sc_count);
4358 	stp->st_stid.sc_type = NFS4_OPEN_STID;
4359 	INIT_LIST_HEAD(&stp->st_locks);
4360 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4361 	get_nfs4_file(fp);
4362 	stp->st_stid.sc_file = fp;
4363 	stp->st_access_bmap = 0;
4364 	stp->st_deny_bmap = 0;
4365 	stp->st_openstp = NULL;
4366 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4367 	list_add(&stp->st_perfile, &fp->fi_stateids);
4368 
4369 out_unlock:
4370 	spin_unlock(&fp->fi_lock);
4371 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
4372 	if (retstp) {
4373 		/* Handle races with CLOSE */
4374 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4375 			nfs4_put_stid(&retstp->st_stid);
4376 			goto retry;
4377 		}
4378 		/* To keep mutex tracking happy */
4379 		mutex_unlock(&stp->st_mutex);
4380 		stp = retstp;
4381 	}
4382 	return stp;
4383 }
4384 
4385 /*
4386  * In the 4.0 case we need to keep the owners around a little while to handle
4387  * CLOSE replay. We still do need to release any file access that is held by
4388  * them before returning however.
4389  */
4390 static void
4391 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4392 {
4393 	struct nfs4_ol_stateid *last;
4394 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
4395 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4396 						nfsd_net_id);
4397 
4398 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4399 
4400 	/*
4401 	 * We know that we hold one reference via nfsd4_close, and another
4402 	 * "persistent" reference for the client. If the refcount is higher
4403 	 * than 2, then there are still calls in progress that are using this
4404 	 * stateid. We can't put the sc_file reference until they are finished.
4405 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
4406 	 * there should be no danger of the refcount going back up again at
4407 	 * this point.
4408 	 */
4409 	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4410 
4411 	release_all_access(s);
4412 	if (s->st_stid.sc_file) {
4413 		put_nfs4_file(s->st_stid.sc_file);
4414 		s->st_stid.sc_file = NULL;
4415 	}
4416 
4417 	spin_lock(&nn->client_lock);
4418 	last = oo->oo_last_closed_stid;
4419 	oo->oo_last_closed_stid = s;
4420 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4421 	oo->oo_time = ktime_get_boottime_seconds();
4422 	spin_unlock(&nn->client_lock);
4423 	if (last)
4424 		nfs4_put_stid(&last->st_stid);
4425 }
4426 
4427 /* search file_hashtbl[] for file */
4428 static struct nfs4_file *
4429 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
4430 {
4431 	struct nfs4_file *fp;
4432 
4433 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4434 				lockdep_is_held(&state_lock)) {
4435 		if (fh_match(&fp->fi_fhandle, fh)) {
4436 			if (refcount_inc_not_zero(&fp->fi_ref))
4437 				return fp;
4438 		}
4439 	}
4440 	return NULL;
4441 }
4442 
4443 struct nfs4_file *
4444 find_file(struct knfsd_fh *fh)
4445 {
4446 	struct nfs4_file *fp;
4447 	unsigned int hashval = file_hashval(fh);
4448 
4449 	rcu_read_lock();
4450 	fp = find_file_locked(fh, hashval);
4451 	rcu_read_unlock();
4452 	return fp;
4453 }
4454 
4455 static struct nfs4_file *
4456 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
4457 {
4458 	struct nfs4_file *fp;
4459 	unsigned int hashval = file_hashval(fh);
4460 
4461 	rcu_read_lock();
4462 	fp = find_file_locked(fh, hashval);
4463 	rcu_read_unlock();
4464 	if (fp)
4465 		return fp;
4466 
4467 	spin_lock(&state_lock);
4468 	fp = find_file_locked(fh, hashval);
4469 	if (likely(fp == NULL)) {
4470 		nfsd4_init_file(fh, hashval, new);
4471 		fp = new;
4472 	}
4473 	spin_unlock(&state_lock);
4474 
4475 	return fp;
4476 }
4477 
4478 /*
4479  * Called to check deny when READ with all zero stateid or
4480  * WRITE with all zero or all one stateid
4481  */
4482 static __be32
4483 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4484 {
4485 	struct nfs4_file *fp;
4486 	__be32 ret = nfs_ok;
4487 
4488 	fp = find_file(&current_fh->fh_handle);
4489 	if (!fp)
4490 		return ret;
4491 	/* Check for conflicting share reservations */
4492 	spin_lock(&fp->fi_lock);
4493 	if (fp->fi_share_deny & deny_type)
4494 		ret = nfserr_locked;
4495 	spin_unlock(&fp->fi_lock);
4496 	put_nfs4_file(fp);
4497 	return ret;
4498 }
4499 
4500 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4501 {
4502 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4503 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4504 					  nfsd_net_id);
4505 
4506 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4507 
4508 	/*
4509 	 * We can't do this in nfsd_break_deleg_cb because it is
4510 	 * already holding inode->i_lock.
4511 	 *
4512 	 * If the dl_time != 0, then we know that it has already been
4513 	 * queued for a lease break. Don't queue it again.
4514 	 */
4515 	spin_lock(&state_lock);
4516 	if (dp->dl_time == 0) {
4517 		dp->dl_time = ktime_get_boottime_seconds();
4518 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4519 	}
4520 	spin_unlock(&state_lock);
4521 }
4522 
4523 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4524 		struct rpc_task *task)
4525 {
4526 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4527 
4528 	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4529 	    dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4530 	        return 1;
4531 
4532 	switch (task->tk_status) {
4533 	case 0:
4534 		return 1;
4535 	case -NFS4ERR_DELAY:
4536 		rpc_delay(task, 2 * HZ);
4537 		return 0;
4538 	case -EBADHANDLE:
4539 	case -NFS4ERR_BAD_STATEID:
4540 		/*
4541 		 * Race: client probably got cb_recall before open reply
4542 		 * granting delegation.
4543 		 */
4544 		if (dp->dl_retries--) {
4545 			rpc_delay(task, 2 * HZ);
4546 			return 0;
4547 		}
4548 		fallthrough;
4549 	default:
4550 		return 1;
4551 	}
4552 }
4553 
4554 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4555 {
4556 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4557 
4558 	nfs4_put_stid(&dp->dl_stid);
4559 }
4560 
4561 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4562 	.prepare	= nfsd4_cb_recall_prepare,
4563 	.done		= nfsd4_cb_recall_done,
4564 	.release	= nfsd4_cb_recall_release,
4565 };
4566 
4567 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4568 {
4569 	/*
4570 	 * We're assuming the state code never drops its reference
4571 	 * without first removing the lease.  Since we're in this lease
4572 	 * callback (and since the lease code is serialized by the
4573 	 * i_lock) we know the server hasn't removed the lease yet, and
4574 	 * we know it's safe to take a reference.
4575 	 */
4576 	refcount_inc(&dp->dl_stid.sc_count);
4577 	nfsd4_run_cb(&dp->dl_recall);
4578 }
4579 
4580 /* Called from break_lease() with i_lock held. */
4581 static bool
4582 nfsd_break_deleg_cb(struct file_lock *fl)
4583 {
4584 	bool ret = false;
4585 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4586 	struct nfs4_file *fp = dp->dl_stid.sc_file;
4587 
4588 	trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
4589 
4590 	/*
4591 	 * We don't want the locks code to timeout the lease for us;
4592 	 * we'll remove it ourself if a delegation isn't returned
4593 	 * in time:
4594 	 */
4595 	fl->fl_break_time = 0;
4596 
4597 	spin_lock(&fp->fi_lock);
4598 	fp->fi_had_conflict = true;
4599 	nfsd_break_one_deleg(dp);
4600 	spin_unlock(&fp->fi_lock);
4601 	return ret;
4602 }
4603 
4604 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4605 {
4606 	struct nfs4_delegation *dl = fl->fl_owner;
4607 	struct svc_rqst *rqst;
4608 	struct nfs4_client *clp;
4609 
4610 	if (!i_am_nfsd())
4611 		return NULL;
4612 	rqst = kthread_data(current);
4613 	/* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4614 	if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4615 		return NULL;
4616 	clp = *(rqst->rq_lease_breaker);
4617 	return dl->dl_stid.sc_client == clp;
4618 }
4619 
4620 static int
4621 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4622 		     struct list_head *dispose)
4623 {
4624 	if (arg & F_UNLCK)
4625 		return lease_modify(onlist, arg, dispose);
4626 	else
4627 		return -EAGAIN;
4628 }
4629 
4630 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4631 	.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4632 	.lm_break = nfsd_break_deleg_cb,
4633 	.lm_change = nfsd_change_deleg_cb,
4634 };
4635 
4636 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4637 {
4638 	if (nfsd4_has_session(cstate))
4639 		return nfs_ok;
4640 	if (seqid == so->so_seqid - 1)
4641 		return nfserr_replay_me;
4642 	if (seqid == so->so_seqid)
4643 		return nfs_ok;
4644 	return nfserr_bad_seqid;
4645 }
4646 
4647 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
4648 						struct nfsd_net *nn)
4649 {
4650 	struct nfs4_client *found;
4651 
4652 	spin_lock(&nn->client_lock);
4653 	found = find_confirmed_client(clid, sessions, nn);
4654 	if (found)
4655 		atomic_inc(&found->cl_rpc_users);
4656 	spin_unlock(&nn->client_lock);
4657 	return found;
4658 }
4659 
4660 static __be32 set_client(clientid_t *clid,
4661 		struct nfsd4_compound_state *cstate,
4662 		struct nfsd_net *nn)
4663 {
4664 	if (cstate->clp) {
4665 		if (!same_clid(&cstate->clp->cl_clientid, clid))
4666 			return nfserr_stale_clientid;
4667 		return nfs_ok;
4668 	}
4669 	if (STALE_CLIENTID(clid, nn))
4670 		return nfserr_stale_clientid;
4671 	/*
4672 	 * We're in the 4.0 case (otherwise the SEQUENCE op would have
4673 	 * set cstate->clp), so session = false:
4674 	 */
4675 	cstate->clp = lookup_clientid(clid, false, nn);
4676 	if (!cstate->clp)
4677 		return nfserr_expired;
4678 	return nfs_ok;
4679 }
4680 
4681 __be32
4682 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4683 		    struct nfsd4_open *open, struct nfsd_net *nn)
4684 {
4685 	clientid_t *clientid = &open->op_clientid;
4686 	struct nfs4_client *clp = NULL;
4687 	unsigned int strhashval;
4688 	struct nfs4_openowner *oo = NULL;
4689 	__be32 status;
4690 
4691 	/*
4692 	 * In case we need it later, after we've already created the
4693 	 * file and don't want to risk a further failure:
4694 	 */
4695 	open->op_file = nfsd4_alloc_file();
4696 	if (open->op_file == NULL)
4697 		return nfserr_jukebox;
4698 
4699 	status = set_client(clientid, cstate, nn);
4700 	if (status)
4701 		return status;
4702 	clp = cstate->clp;
4703 
4704 	strhashval = ownerstr_hashval(&open->op_owner);
4705 	oo = find_openstateowner_str(strhashval, open, clp);
4706 	open->op_openowner = oo;
4707 	if (!oo) {
4708 		goto new_owner;
4709 	}
4710 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4711 		/* Replace unconfirmed owners without checking for replay. */
4712 		release_openowner(oo);
4713 		open->op_openowner = NULL;
4714 		goto new_owner;
4715 	}
4716 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4717 	if (status)
4718 		return status;
4719 	goto alloc_stateid;
4720 new_owner:
4721 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
4722 	if (oo == NULL)
4723 		return nfserr_jukebox;
4724 	open->op_openowner = oo;
4725 alloc_stateid:
4726 	open->op_stp = nfs4_alloc_open_stateid(clp);
4727 	if (!open->op_stp)
4728 		return nfserr_jukebox;
4729 
4730 	if (nfsd4_has_session(cstate) &&
4731 	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4732 		open->op_odstate = alloc_clnt_odstate(clp);
4733 		if (!open->op_odstate)
4734 			return nfserr_jukebox;
4735 	}
4736 
4737 	return nfs_ok;
4738 }
4739 
4740 static inline __be32
4741 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4742 {
4743 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4744 		return nfserr_openmode;
4745 	else
4746 		return nfs_ok;
4747 }
4748 
4749 static int share_access_to_flags(u32 share_access)
4750 {
4751 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4752 }
4753 
4754 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4755 {
4756 	struct nfs4_stid *ret;
4757 
4758 	ret = find_stateid_by_type(cl, s,
4759 				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4760 	if (!ret)
4761 		return NULL;
4762 	return delegstateid(ret);
4763 }
4764 
4765 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4766 {
4767 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4768 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4769 }
4770 
4771 static __be32
4772 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4773 		struct nfs4_delegation **dp)
4774 {
4775 	int flags;
4776 	__be32 status = nfserr_bad_stateid;
4777 	struct nfs4_delegation *deleg;
4778 
4779 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4780 	if (deleg == NULL)
4781 		goto out;
4782 	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4783 		nfs4_put_stid(&deleg->dl_stid);
4784 		if (cl->cl_minorversion)
4785 			status = nfserr_deleg_revoked;
4786 		goto out;
4787 	}
4788 	flags = share_access_to_flags(open->op_share_access);
4789 	status = nfs4_check_delegmode(deleg, flags);
4790 	if (status) {
4791 		nfs4_put_stid(&deleg->dl_stid);
4792 		goto out;
4793 	}
4794 	*dp = deleg;
4795 out:
4796 	if (!nfsd4_is_deleg_cur(open))
4797 		return nfs_ok;
4798 	if (status)
4799 		return status;
4800 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4801 	return nfs_ok;
4802 }
4803 
4804 static inline int nfs4_access_to_access(u32 nfs4_access)
4805 {
4806 	int flags = 0;
4807 
4808 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4809 		flags |= NFSD_MAY_READ;
4810 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4811 		flags |= NFSD_MAY_WRITE;
4812 	return flags;
4813 }
4814 
4815 static inline __be32
4816 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4817 		struct nfsd4_open *open)
4818 {
4819 	struct iattr iattr = {
4820 		.ia_valid = ATTR_SIZE,
4821 		.ia_size = 0,
4822 	};
4823 	if (!open->op_truncate)
4824 		return 0;
4825 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4826 		return nfserr_inval;
4827 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
4828 }
4829 
4830 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4831 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4832 		struct nfsd4_open *open)
4833 {
4834 	struct nfsd_file *nf = NULL;
4835 	__be32 status;
4836 	int oflag = nfs4_access_to_omode(open->op_share_access);
4837 	int access = nfs4_access_to_access(open->op_share_access);
4838 	unsigned char old_access_bmap, old_deny_bmap;
4839 
4840 	spin_lock(&fp->fi_lock);
4841 
4842 	/*
4843 	 * Are we trying to set a deny mode that would conflict with
4844 	 * current access?
4845 	 */
4846 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4847 	if (status != nfs_ok) {
4848 		spin_unlock(&fp->fi_lock);
4849 		goto out;
4850 	}
4851 
4852 	/* set access to the file */
4853 	status = nfs4_file_get_access(fp, open->op_share_access);
4854 	if (status != nfs_ok) {
4855 		spin_unlock(&fp->fi_lock);
4856 		goto out;
4857 	}
4858 
4859 	/* Set access bits in stateid */
4860 	old_access_bmap = stp->st_access_bmap;
4861 	set_access(open->op_share_access, stp);
4862 
4863 	/* Set new deny mask */
4864 	old_deny_bmap = stp->st_deny_bmap;
4865 	set_deny(open->op_share_deny, stp);
4866 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4867 
4868 	if (!fp->fi_fds[oflag]) {
4869 		spin_unlock(&fp->fi_lock);
4870 		status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4871 		if (status)
4872 			goto out_put_access;
4873 		spin_lock(&fp->fi_lock);
4874 		if (!fp->fi_fds[oflag]) {
4875 			fp->fi_fds[oflag] = nf;
4876 			nf = NULL;
4877 		}
4878 	}
4879 	spin_unlock(&fp->fi_lock);
4880 	if (nf)
4881 		nfsd_file_put(nf);
4882 
4883 	status = nfsd4_truncate(rqstp, cur_fh, open);
4884 	if (status)
4885 		goto out_put_access;
4886 out:
4887 	return status;
4888 out_put_access:
4889 	stp->st_access_bmap = old_access_bmap;
4890 	nfs4_file_put_access(fp, open->op_share_access);
4891 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4892 	goto out;
4893 }
4894 
4895 static __be32
4896 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4897 {
4898 	__be32 status;
4899 	unsigned char old_deny_bmap = stp->st_deny_bmap;
4900 
4901 	if (!test_access(open->op_share_access, stp))
4902 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4903 
4904 	/* test and set deny mode */
4905 	spin_lock(&fp->fi_lock);
4906 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4907 	if (status == nfs_ok) {
4908 		set_deny(open->op_share_deny, stp);
4909 		fp->fi_share_deny |=
4910 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4911 	}
4912 	spin_unlock(&fp->fi_lock);
4913 
4914 	if (status != nfs_ok)
4915 		return status;
4916 
4917 	status = nfsd4_truncate(rqstp, cur_fh, open);
4918 	if (status != nfs_ok)
4919 		reset_union_bmap_deny(old_deny_bmap, stp);
4920 	return status;
4921 }
4922 
4923 /* Should we give out recallable state?: */
4924 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4925 {
4926 	if (clp->cl_cb_state == NFSD4_CB_UP)
4927 		return true;
4928 	/*
4929 	 * In the sessions case, since we don't have to establish a
4930 	 * separate connection for callbacks, we assume it's OK
4931 	 * until we hear otherwise:
4932 	 */
4933 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4934 }
4935 
4936 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4937 						int flag)
4938 {
4939 	struct file_lock *fl;
4940 
4941 	fl = locks_alloc_lock();
4942 	if (!fl)
4943 		return NULL;
4944 	fl->fl_lmops = &nfsd_lease_mng_ops;
4945 	fl->fl_flags = FL_DELEG;
4946 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4947 	fl->fl_end = OFFSET_MAX;
4948 	fl->fl_owner = (fl_owner_t)dp;
4949 	fl->fl_pid = current->tgid;
4950 	fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
4951 	return fl;
4952 }
4953 
4954 static struct nfs4_delegation *
4955 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4956 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4957 {
4958 	int status = 0;
4959 	struct nfs4_delegation *dp;
4960 	struct nfsd_file *nf;
4961 	struct file_lock *fl;
4962 
4963 	/*
4964 	 * The fi_had_conflict and nfs_get_existing_delegation checks
4965 	 * here are just optimizations; we'll need to recheck them at
4966 	 * the end:
4967 	 */
4968 	if (fp->fi_had_conflict)
4969 		return ERR_PTR(-EAGAIN);
4970 
4971 	nf = find_readable_file(fp);
4972 	if (!nf) {
4973 		/* We should always have a readable file here */
4974 		WARN_ON_ONCE(1);
4975 		return ERR_PTR(-EBADF);
4976 	}
4977 	spin_lock(&state_lock);
4978 	spin_lock(&fp->fi_lock);
4979 	if (nfs4_delegation_exists(clp, fp))
4980 		status = -EAGAIN;
4981 	else if (!fp->fi_deleg_file) {
4982 		fp->fi_deleg_file = nf;
4983 		/* increment early to prevent fi_deleg_file from being
4984 		 * cleared */
4985 		fp->fi_delegees = 1;
4986 		nf = NULL;
4987 	} else
4988 		fp->fi_delegees++;
4989 	spin_unlock(&fp->fi_lock);
4990 	spin_unlock(&state_lock);
4991 	if (nf)
4992 		nfsd_file_put(nf);
4993 	if (status)
4994 		return ERR_PTR(status);
4995 
4996 	status = -ENOMEM;
4997 	dp = alloc_init_deleg(clp, fp, fh, odstate);
4998 	if (!dp)
4999 		goto out_delegees;
5000 
5001 	fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5002 	if (!fl)
5003 		goto out_clnt_odstate;
5004 
5005 	status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5006 	if (fl)
5007 		locks_free_lock(fl);
5008 	if (status)
5009 		goto out_clnt_odstate;
5010 
5011 	spin_lock(&state_lock);
5012 	spin_lock(&fp->fi_lock);
5013 	if (fp->fi_had_conflict)
5014 		status = -EAGAIN;
5015 	else
5016 		status = hash_delegation_locked(dp, fp);
5017 	spin_unlock(&fp->fi_lock);
5018 	spin_unlock(&state_lock);
5019 
5020 	if (status)
5021 		goto out_unlock;
5022 
5023 	return dp;
5024 out_unlock:
5025 	vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5026 out_clnt_odstate:
5027 	put_clnt_odstate(dp->dl_clnt_odstate);
5028 	nfs4_put_stid(&dp->dl_stid);
5029 out_delegees:
5030 	put_deleg_file(fp);
5031 	return ERR_PTR(status);
5032 }
5033 
5034 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5035 {
5036 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5037 	if (status == -EAGAIN)
5038 		open->op_why_no_deleg = WND4_CONTENTION;
5039 	else {
5040 		open->op_why_no_deleg = WND4_RESOURCE;
5041 		switch (open->op_deleg_want) {
5042 		case NFS4_SHARE_WANT_READ_DELEG:
5043 		case NFS4_SHARE_WANT_WRITE_DELEG:
5044 		case NFS4_SHARE_WANT_ANY_DELEG:
5045 			break;
5046 		case NFS4_SHARE_WANT_CANCEL:
5047 			open->op_why_no_deleg = WND4_CANCELLED;
5048 			break;
5049 		case NFS4_SHARE_WANT_NO_DELEG:
5050 			WARN_ON_ONCE(1);
5051 		}
5052 	}
5053 }
5054 
5055 /*
5056  * Attempt to hand out a delegation.
5057  *
5058  * Note we don't support write delegations, and won't until the vfs has
5059  * proper support for them.
5060  */
5061 static void
5062 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
5063 			struct nfs4_ol_stateid *stp)
5064 {
5065 	struct nfs4_delegation *dp;
5066 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5067 	struct nfs4_client *clp = stp->st_stid.sc_client;
5068 	int cb_up;
5069 	int status = 0;
5070 
5071 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5072 	open->op_recall = 0;
5073 	switch (open->op_claim_type) {
5074 		case NFS4_OPEN_CLAIM_PREVIOUS:
5075 			if (!cb_up)
5076 				open->op_recall = 1;
5077 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5078 				goto out_no_deleg;
5079 			break;
5080 		case NFS4_OPEN_CLAIM_NULL:
5081 		case NFS4_OPEN_CLAIM_FH:
5082 			/*
5083 			 * Let's not give out any delegations till everyone's
5084 			 * had the chance to reclaim theirs, *and* until
5085 			 * NLM locks have all been reclaimed:
5086 			 */
5087 			if (locks_in_grace(clp->net))
5088 				goto out_no_deleg;
5089 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5090 				goto out_no_deleg;
5091 			/*
5092 			 * Also, if the file was opened for write or
5093 			 * create, there's a good chance the client's
5094 			 * about to write to it, resulting in an
5095 			 * immediate recall (since we don't support
5096 			 * write delegations):
5097 			 */
5098 			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
5099 				goto out_no_deleg;
5100 			if (open->op_create == NFS4_OPEN_CREATE)
5101 				goto out_no_deleg;
5102 			break;
5103 		default:
5104 			goto out_no_deleg;
5105 	}
5106 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5107 	if (IS_ERR(dp))
5108 		goto out_no_deleg;
5109 
5110 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5111 
5112 	trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5113 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5114 	nfs4_put_stid(&dp->dl_stid);
5115 	return;
5116 out_no_deleg:
5117 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5118 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5119 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5120 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5121 		open->op_recall = 1;
5122 	}
5123 
5124 	/* 4.1 client asking for a delegation? */
5125 	if (open->op_deleg_want)
5126 		nfsd4_open_deleg_none_ext(open, status);
5127 	return;
5128 }
5129 
5130 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5131 					struct nfs4_delegation *dp)
5132 {
5133 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5134 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5135 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5136 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5137 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5138 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5139 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5140 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5141 	}
5142 	/* Otherwise the client must be confused wanting a delegation
5143 	 * it already has, therefore we don't return
5144 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5145 	 */
5146 }
5147 
5148 __be32
5149 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5150 {
5151 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
5152 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5153 	struct nfs4_file *fp = NULL;
5154 	struct nfs4_ol_stateid *stp = NULL;
5155 	struct nfs4_delegation *dp = NULL;
5156 	__be32 status;
5157 	bool new_stp = false;
5158 
5159 	/*
5160 	 * Lookup file; if found, lookup stateid and check open request,
5161 	 * and check for delegations in the process of being recalled.
5162 	 * If not found, create the nfs4_file struct
5163 	 */
5164 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
5165 	if (fp != open->op_file) {
5166 		status = nfs4_check_deleg(cl, open, &dp);
5167 		if (status)
5168 			goto out;
5169 		stp = nfsd4_find_and_lock_existing_open(fp, open);
5170 	} else {
5171 		open->op_file = NULL;
5172 		status = nfserr_bad_stateid;
5173 		if (nfsd4_is_deleg_cur(open))
5174 			goto out;
5175 	}
5176 
5177 	if (!stp) {
5178 		stp = init_open_stateid(fp, open);
5179 		if (!open->op_stp)
5180 			new_stp = true;
5181 	}
5182 
5183 	/*
5184 	 * OPEN the file, or upgrade an existing OPEN.
5185 	 * If truncate fails, the OPEN fails.
5186 	 *
5187 	 * stp is already locked.
5188 	 */
5189 	if (!new_stp) {
5190 		/* Stateid was found, this is an OPEN upgrade */
5191 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5192 		if (status) {
5193 			mutex_unlock(&stp->st_mutex);
5194 			goto out;
5195 		}
5196 	} else {
5197 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5198 		if (status) {
5199 			stp->st_stid.sc_type = NFS4_CLOSED_STID;
5200 			release_open_stateid(stp);
5201 			mutex_unlock(&stp->st_mutex);
5202 			goto out;
5203 		}
5204 
5205 		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5206 							open->op_odstate);
5207 		if (stp->st_clnt_odstate == open->op_odstate)
5208 			open->op_odstate = NULL;
5209 	}
5210 
5211 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5212 	mutex_unlock(&stp->st_mutex);
5213 
5214 	if (nfsd4_has_session(&resp->cstate)) {
5215 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5216 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5217 			open->op_why_no_deleg = WND4_NOT_WANTED;
5218 			goto nodeleg;
5219 		}
5220 	}
5221 
5222 	/*
5223 	* Attempt to hand out a delegation. No error return, because the
5224 	* OPEN succeeds even if we fail.
5225 	*/
5226 	nfs4_open_delegation(current_fh, open, stp);
5227 nodeleg:
5228 	status = nfs_ok;
5229 	trace_nfsd_open(&stp->st_stid.sc_stateid);
5230 out:
5231 	/* 4.1 client trying to upgrade/downgrade delegation? */
5232 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5233 	    open->op_deleg_want)
5234 		nfsd4_deleg_xgrade_none_ext(open, dp);
5235 
5236 	if (fp)
5237 		put_nfs4_file(fp);
5238 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5239 		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5240 	/*
5241 	* To finish the open response, we just need to set the rflags.
5242 	*/
5243 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5244 	if (nfsd4_has_session(&resp->cstate))
5245 		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5246 	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5247 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5248 
5249 	if (dp)
5250 		nfs4_put_stid(&dp->dl_stid);
5251 	if (stp)
5252 		nfs4_put_stid(&stp->st_stid);
5253 
5254 	return status;
5255 }
5256 
5257 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5258 			      struct nfsd4_open *open)
5259 {
5260 	if (open->op_openowner) {
5261 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5262 
5263 		nfsd4_cstate_assign_replay(cstate, so);
5264 		nfs4_put_stateowner(so);
5265 	}
5266 	if (open->op_file)
5267 		kmem_cache_free(file_slab, open->op_file);
5268 	if (open->op_stp)
5269 		nfs4_put_stid(&open->op_stp->st_stid);
5270 	if (open->op_odstate)
5271 		kmem_cache_free(odstate_slab, open->op_odstate);
5272 }
5273 
5274 __be32
5275 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5276 	    union nfsd4_op_u *u)
5277 {
5278 	clientid_t *clid = &u->renew;
5279 	struct nfs4_client *clp;
5280 	__be32 status;
5281 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5282 
5283 	trace_nfsd_clid_renew(clid);
5284 	status = set_client(clid, cstate, nn);
5285 	if (status)
5286 		return status;
5287 	clp = cstate->clp;
5288 	if (!list_empty(&clp->cl_delegations)
5289 			&& clp->cl_cb_state != NFSD4_CB_UP)
5290 		return nfserr_cb_path_down;
5291 	return nfs_ok;
5292 }
5293 
5294 void
5295 nfsd4_end_grace(struct nfsd_net *nn)
5296 {
5297 	/* do nothing if grace period already ended */
5298 	if (nn->grace_ended)
5299 		return;
5300 
5301 	trace_nfsd_grace_complete(nn);
5302 	nn->grace_ended = true;
5303 	/*
5304 	 * If the server goes down again right now, an NFSv4
5305 	 * client will still be allowed to reclaim after it comes back up,
5306 	 * even if it hasn't yet had a chance to reclaim state this time.
5307 	 *
5308 	 */
5309 	nfsd4_record_grace_done(nn);
5310 	/*
5311 	 * At this point, NFSv4 clients can still reclaim.  But if the
5312 	 * server crashes, any that have not yet reclaimed will be out
5313 	 * of luck on the next boot.
5314 	 *
5315 	 * (NFSv4.1+ clients are considered to have reclaimed once they
5316 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
5317 	 * have reclaimed after their first OPEN.)
5318 	 */
5319 	locks_end_grace(&nn->nfsd4_manager);
5320 	/*
5321 	 * At this point, and once lockd and/or any other containers
5322 	 * exit their grace period, further reclaims will fail and
5323 	 * regular locking can resume.
5324 	 */
5325 }
5326 
5327 /*
5328  * If we've waited a lease period but there are still clients trying to
5329  * reclaim, wait a little longer to give them a chance to finish.
5330  */
5331 static bool clients_still_reclaiming(struct nfsd_net *nn)
5332 {
5333 	time64_t double_grace_period_end = nn->boot_time +
5334 					   2 * nn->nfsd4_lease;
5335 
5336 	if (nn->track_reclaim_completes &&
5337 			atomic_read(&nn->nr_reclaim_complete) ==
5338 			nn->reclaim_str_hashtbl_size)
5339 		return false;
5340 	if (!nn->somebody_reclaimed)
5341 		return false;
5342 	nn->somebody_reclaimed = false;
5343 	/*
5344 	 * If we've given them *two* lease times to reclaim, and they're
5345 	 * still not done, give up:
5346 	 */
5347 	if (ktime_get_boottime_seconds() > double_grace_period_end)
5348 		return false;
5349 	return true;
5350 }
5351 
5352 struct laundry_time {
5353 	time64_t cutoff;
5354 	time64_t new_timeo;
5355 };
5356 
5357 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5358 {
5359 	time64_t time_remaining;
5360 
5361 	if (last_refresh < lt->cutoff)
5362 		return true;
5363 	time_remaining = last_refresh - lt->cutoff;
5364 	lt->new_timeo = min(lt->new_timeo, time_remaining);
5365 	return false;
5366 }
5367 
5368 static time64_t
5369 nfs4_laundromat(struct nfsd_net *nn)
5370 {
5371 	struct nfs4_client *clp;
5372 	struct nfs4_openowner *oo;
5373 	struct nfs4_delegation *dp;
5374 	struct nfs4_ol_stateid *stp;
5375 	struct nfsd4_blocked_lock *nbl;
5376 	struct list_head *pos, *next, reaplist;
5377 	struct laundry_time lt = {
5378 		.cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
5379 		.new_timeo = nn->nfsd4_lease
5380 	};
5381 	struct nfs4_cpntf_state *cps;
5382 	copy_stateid_t *cps_t;
5383 	int i;
5384 
5385 	if (clients_still_reclaiming(nn)) {
5386 		lt.new_timeo = 0;
5387 		goto out;
5388 	}
5389 	nfsd4_end_grace(nn);
5390 	INIT_LIST_HEAD(&reaplist);
5391 
5392 	spin_lock(&nn->s2s_cp_lock);
5393 	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5394 		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5395 		if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5396 				state_expired(&lt, cps->cpntf_time))
5397 			_free_cpntf_state_locked(nn, cps);
5398 	}
5399 	spin_unlock(&nn->s2s_cp_lock);
5400 
5401 	spin_lock(&nn->client_lock);
5402 	list_for_each_safe(pos, next, &nn->client_lru) {
5403 		clp = list_entry(pos, struct nfs4_client, cl_lru);
5404 		if (!state_expired(&lt, clp->cl_time))
5405 			break;
5406 		if (mark_client_expired_locked(clp)) {
5407 			trace_nfsd_clid_expired(&clp->cl_clientid);
5408 			continue;
5409 		}
5410 		list_add(&clp->cl_lru, &reaplist);
5411 	}
5412 	spin_unlock(&nn->client_lock);
5413 	list_for_each_safe(pos, next, &reaplist) {
5414 		clp = list_entry(pos, struct nfs4_client, cl_lru);
5415 		trace_nfsd_clid_purged(&clp->cl_clientid);
5416 		list_del_init(&clp->cl_lru);
5417 		expire_client(clp);
5418 	}
5419 	spin_lock(&state_lock);
5420 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
5421 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5422 		if (!state_expired(&lt, dp->dl_time))
5423 			break;
5424 		WARN_ON(!unhash_delegation_locked(dp));
5425 		list_add(&dp->dl_recall_lru, &reaplist);
5426 	}
5427 	spin_unlock(&state_lock);
5428 	while (!list_empty(&reaplist)) {
5429 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
5430 					dl_recall_lru);
5431 		list_del_init(&dp->dl_recall_lru);
5432 		revoke_delegation(dp);
5433 	}
5434 
5435 	spin_lock(&nn->client_lock);
5436 	while (!list_empty(&nn->close_lru)) {
5437 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5438 					oo_close_lru);
5439 		if (!state_expired(&lt, oo->oo_time))
5440 			break;
5441 		list_del_init(&oo->oo_close_lru);
5442 		stp = oo->oo_last_closed_stid;
5443 		oo->oo_last_closed_stid = NULL;
5444 		spin_unlock(&nn->client_lock);
5445 		nfs4_put_stid(&stp->st_stid);
5446 		spin_lock(&nn->client_lock);
5447 	}
5448 	spin_unlock(&nn->client_lock);
5449 
5450 	/*
5451 	 * It's possible for a client to try and acquire an already held lock
5452 	 * that is being held for a long time, and then lose interest in it.
5453 	 * So, we clean out any un-revisited request after a lease period
5454 	 * under the assumption that the client is no longer interested.
5455 	 *
5456 	 * RFC5661, sec. 9.6 states that the client must not rely on getting
5457 	 * notifications and must continue to poll for locks, even when the
5458 	 * server supports them. Thus this shouldn't lead to clients blocking
5459 	 * indefinitely once the lock does become free.
5460 	 */
5461 	BUG_ON(!list_empty(&reaplist));
5462 	spin_lock(&nn->blocked_locks_lock);
5463 	while (!list_empty(&nn->blocked_locks_lru)) {
5464 		nbl = list_first_entry(&nn->blocked_locks_lru,
5465 					struct nfsd4_blocked_lock, nbl_lru);
5466 		if (!state_expired(&lt, nbl->nbl_time))
5467 			break;
5468 		list_move(&nbl->nbl_lru, &reaplist);
5469 		list_del_init(&nbl->nbl_list);
5470 	}
5471 	spin_unlock(&nn->blocked_locks_lock);
5472 
5473 	while (!list_empty(&reaplist)) {
5474 		nbl = list_first_entry(&reaplist,
5475 					struct nfsd4_blocked_lock, nbl_lru);
5476 		list_del_init(&nbl->nbl_lru);
5477 		free_blocked_lock(nbl);
5478 	}
5479 out:
5480 	return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5481 }
5482 
5483 static struct workqueue_struct *laundry_wq;
5484 static void laundromat_main(struct work_struct *);
5485 
5486 static void
5487 laundromat_main(struct work_struct *laundry)
5488 {
5489 	time64_t t;
5490 	struct delayed_work *dwork = to_delayed_work(laundry);
5491 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5492 					   laundromat_work);
5493 
5494 	t = nfs4_laundromat(nn);
5495 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5496 }
5497 
5498 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5499 {
5500 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5501 		return nfserr_bad_stateid;
5502 	return nfs_ok;
5503 }
5504 
5505 static inline int
5506 access_permit_read(struct nfs4_ol_stateid *stp)
5507 {
5508 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
5509 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
5510 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
5511 }
5512 
5513 static inline int
5514 access_permit_write(struct nfs4_ol_stateid *stp)
5515 {
5516 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
5517 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
5518 }
5519 
5520 static
5521 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5522 {
5523         __be32 status = nfserr_openmode;
5524 
5525 	/* For lock stateid's, we test the parent open, not the lock: */
5526 	if (stp->st_openstp)
5527 		stp = stp->st_openstp;
5528 	if ((flags & WR_STATE) && !access_permit_write(stp))
5529                 goto out;
5530 	if ((flags & RD_STATE) && !access_permit_read(stp))
5531                 goto out;
5532 	status = nfs_ok;
5533 out:
5534 	return status;
5535 }
5536 
5537 static inline __be32
5538 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5539 {
5540 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
5541 		return nfs_ok;
5542 	else if (opens_in_grace(net)) {
5543 		/* Answer in remaining cases depends on existence of
5544 		 * conflicting state; so we must wait out the grace period. */
5545 		return nfserr_grace;
5546 	} else if (flags & WR_STATE)
5547 		return nfs4_share_conflict(current_fh,
5548 				NFS4_SHARE_DENY_WRITE);
5549 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5550 		return nfs4_share_conflict(current_fh,
5551 				NFS4_SHARE_DENY_READ);
5552 }
5553 
5554 /*
5555  * Allow READ/WRITE during grace period on recovered state only for files
5556  * that are not able to provide mandatory locking.
5557  */
5558 static inline int
5559 grace_disallows_io(struct net *net, struct inode *inode)
5560 {
5561 	return opens_in_grace(net) && mandatory_lock(inode);
5562 }
5563 
5564 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5565 {
5566 	/*
5567 	 * When sessions are used the stateid generation number is ignored
5568 	 * when it is zero.
5569 	 */
5570 	if (has_session && in->si_generation == 0)
5571 		return nfs_ok;
5572 
5573 	if (in->si_generation == ref->si_generation)
5574 		return nfs_ok;
5575 
5576 	/* If the client sends us a stateid from the future, it's buggy: */
5577 	if (nfsd4_stateid_generation_after(in, ref))
5578 		return nfserr_bad_stateid;
5579 	/*
5580 	 * However, we could see a stateid from the past, even from a
5581 	 * non-buggy client.  For example, if the client sends a lock
5582 	 * while some IO is outstanding, the lock may bump si_generation
5583 	 * while the IO is still in flight.  The client could avoid that
5584 	 * situation by waiting for responses on all the IO requests,
5585 	 * but better performance may result in retrying IO that
5586 	 * receives an old_stateid error if requests are rarely
5587 	 * reordered in flight:
5588 	 */
5589 	return nfserr_old_stateid;
5590 }
5591 
5592 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5593 {
5594 	__be32 ret;
5595 
5596 	spin_lock(&s->sc_lock);
5597 	ret = nfsd4_verify_open_stid(s);
5598 	if (ret == nfs_ok)
5599 		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5600 	spin_unlock(&s->sc_lock);
5601 	return ret;
5602 }
5603 
5604 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5605 {
5606 	if (ols->st_stateowner->so_is_open_owner &&
5607 	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5608 		return nfserr_bad_stateid;
5609 	return nfs_ok;
5610 }
5611 
5612 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5613 {
5614 	struct nfs4_stid *s;
5615 	__be32 status = nfserr_bad_stateid;
5616 
5617 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5618 		CLOSE_STATEID(stateid))
5619 		return status;
5620 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
5621 		return status;
5622 	spin_lock(&cl->cl_lock);
5623 	s = find_stateid_locked(cl, stateid);
5624 	if (!s)
5625 		goto out_unlock;
5626 	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5627 	if (status)
5628 		goto out_unlock;
5629 	switch (s->sc_type) {
5630 	case NFS4_DELEG_STID:
5631 		status = nfs_ok;
5632 		break;
5633 	case NFS4_REVOKED_DELEG_STID:
5634 		status = nfserr_deleg_revoked;
5635 		break;
5636 	case NFS4_OPEN_STID:
5637 	case NFS4_LOCK_STID:
5638 		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5639 		break;
5640 	default:
5641 		printk("unknown stateid type %x\n", s->sc_type);
5642 		fallthrough;
5643 	case NFS4_CLOSED_STID:
5644 	case NFS4_CLOSED_DELEG_STID:
5645 		status = nfserr_bad_stateid;
5646 	}
5647 out_unlock:
5648 	spin_unlock(&cl->cl_lock);
5649 	return status;
5650 }
5651 
5652 __be32
5653 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5654 		     stateid_t *stateid, unsigned char typemask,
5655 		     struct nfs4_stid **s, struct nfsd_net *nn)
5656 {
5657 	__be32 status;
5658 	bool return_revoked = false;
5659 
5660 	/*
5661 	 *  only return revoked delegations if explicitly asked.
5662 	 *  otherwise we report revoked or bad_stateid status.
5663 	 */
5664 	if (typemask & NFS4_REVOKED_DELEG_STID)
5665 		return_revoked = true;
5666 	else if (typemask & NFS4_DELEG_STID)
5667 		typemask |= NFS4_REVOKED_DELEG_STID;
5668 
5669 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5670 		CLOSE_STATEID(stateid))
5671 		return nfserr_bad_stateid;
5672 	status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
5673 	if (status == nfserr_stale_clientid) {
5674 		if (cstate->session)
5675 			return nfserr_bad_stateid;
5676 		return nfserr_stale_stateid;
5677 	}
5678 	if (status)
5679 		return status;
5680 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
5681 	if (!*s)
5682 		return nfserr_bad_stateid;
5683 	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5684 		nfs4_put_stid(*s);
5685 		if (cstate->minorversion)
5686 			return nfserr_deleg_revoked;
5687 		return nfserr_bad_stateid;
5688 	}
5689 	return nfs_ok;
5690 }
5691 
5692 static struct nfsd_file *
5693 nfs4_find_file(struct nfs4_stid *s, int flags)
5694 {
5695 	if (!s)
5696 		return NULL;
5697 
5698 	switch (s->sc_type) {
5699 	case NFS4_DELEG_STID:
5700 		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5701 			return NULL;
5702 		return nfsd_file_get(s->sc_file->fi_deleg_file);
5703 	case NFS4_OPEN_STID:
5704 	case NFS4_LOCK_STID:
5705 		if (flags & RD_STATE)
5706 			return find_readable_file(s->sc_file);
5707 		else
5708 			return find_writeable_file(s->sc_file);
5709 	}
5710 
5711 	return NULL;
5712 }
5713 
5714 static __be32
5715 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5716 {
5717 	__be32 status;
5718 
5719 	status = nfsd4_check_openowner_confirmed(ols);
5720 	if (status)
5721 		return status;
5722 	return nfs4_check_openmode(ols, flags);
5723 }
5724 
5725 static __be32
5726 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5727 		struct nfsd_file **nfp, int flags)
5728 {
5729 	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5730 	struct nfsd_file *nf;
5731 	__be32 status;
5732 
5733 	nf = nfs4_find_file(s, flags);
5734 	if (nf) {
5735 		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5736 				acc | NFSD_MAY_OWNER_OVERRIDE);
5737 		if (status) {
5738 			nfsd_file_put(nf);
5739 			goto out;
5740 		}
5741 	} else {
5742 		status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5743 		if (status)
5744 			return status;
5745 	}
5746 	*nfp = nf;
5747 out:
5748 	return status;
5749 }
5750 static void
5751 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5752 {
5753 	WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
5754 	if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
5755 		return;
5756 	list_del(&cps->cp_list);
5757 	idr_remove(&nn->s2s_cp_stateids,
5758 		   cps->cp_stateid.stid.si_opaque.so_id);
5759 	kfree(cps);
5760 }
5761 /*
5762  * A READ from an inter server to server COPY will have a
5763  * copy stateid. Look up the copy notify stateid from the
5764  * idr structure and take a reference on it.
5765  */
5766 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5767 			  struct nfs4_client *clp,
5768 			  struct nfs4_cpntf_state **cps)
5769 {
5770 	copy_stateid_t *cps_t;
5771 	struct nfs4_cpntf_state *state = NULL;
5772 
5773 	if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
5774 		return nfserr_bad_stateid;
5775 	spin_lock(&nn->s2s_cp_lock);
5776 	cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
5777 	if (cps_t) {
5778 		state = container_of(cps_t, struct nfs4_cpntf_state,
5779 				     cp_stateid);
5780 		if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
5781 			state = NULL;
5782 			goto unlock;
5783 		}
5784 		if (!clp)
5785 			refcount_inc(&state->cp_stateid.sc_count);
5786 		else
5787 			_free_cpntf_state_locked(nn, state);
5788 	}
5789 unlock:
5790 	spin_unlock(&nn->s2s_cp_lock);
5791 	if (!state)
5792 		return nfserr_bad_stateid;
5793 	if (!clp && state)
5794 		*cps = state;
5795 	return 0;
5796 }
5797 
5798 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5799 			       struct nfs4_stid **stid)
5800 {
5801 	__be32 status;
5802 	struct nfs4_cpntf_state *cps = NULL;
5803 	struct nfs4_client *found;
5804 
5805 	status = manage_cpntf_state(nn, st, NULL, &cps);
5806 	if (status)
5807 		return status;
5808 
5809 	cps->cpntf_time = ktime_get_boottime_seconds();
5810 
5811 	status = nfserr_expired;
5812 	found = lookup_clientid(&cps->cp_p_clid, true, nn);
5813 	if (!found)
5814 		goto out;
5815 
5816 	*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
5817 			NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
5818 	if (*stid)
5819 		status = nfs_ok;
5820 	else
5821 		status = nfserr_bad_stateid;
5822 
5823 	put_client_renew(found);
5824 out:
5825 	nfs4_put_cpntf_state(nn, cps);
5826 	return status;
5827 }
5828 
5829 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5830 {
5831 	spin_lock(&nn->s2s_cp_lock);
5832 	_free_cpntf_state_locked(nn, cps);
5833 	spin_unlock(&nn->s2s_cp_lock);
5834 }
5835 
5836 /*
5837  * Checks for stateid operations
5838  */
5839 __be32
5840 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5841 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5842 		stateid_t *stateid, int flags, struct nfsd_file **nfp,
5843 		struct nfs4_stid **cstid)
5844 {
5845 	struct inode *ino = d_inode(fhp->fh_dentry);
5846 	struct net *net = SVC_NET(rqstp);
5847 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5848 	struct nfs4_stid *s = NULL;
5849 	__be32 status;
5850 
5851 	if (nfp)
5852 		*nfp = NULL;
5853 
5854 	if (grace_disallows_io(net, ino))
5855 		return nfserr_grace;
5856 
5857 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5858 		status = check_special_stateids(net, fhp, stateid, flags);
5859 		goto done;
5860 	}
5861 
5862 	status = nfsd4_lookup_stateid(cstate, stateid,
5863 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5864 				&s, nn);
5865 	if (status == nfserr_bad_stateid)
5866 		status = find_cpntf_state(nn, stateid, &s);
5867 	if (status)
5868 		return status;
5869 	status = nfsd4_stid_check_stateid_generation(stateid, s,
5870 			nfsd4_has_session(cstate));
5871 	if (status)
5872 		goto out;
5873 
5874 	switch (s->sc_type) {
5875 	case NFS4_DELEG_STID:
5876 		status = nfs4_check_delegmode(delegstateid(s), flags);
5877 		break;
5878 	case NFS4_OPEN_STID:
5879 	case NFS4_LOCK_STID:
5880 		status = nfs4_check_olstateid(openlockstateid(s), flags);
5881 		break;
5882 	default:
5883 		status = nfserr_bad_stateid;
5884 		break;
5885 	}
5886 	if (status)
5887 		goto out;
5888 	status = nfs4_check_fh(fhp, s);
5889 
5890 done:
5891 	if (status == nfs_ok && nfp)
5892 		status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
5893 out:
5894 	if (s) {
5895 		if (!status && cstid)
5896 			*cstid = s;
5897 		else
5898 			nfs4_put_stid(s);
5899 	}
5900 	return status;
5901 }
5902 
5903 /*
5904  * Test if the stateid is valid
5905  */
5906 __be32
5907 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5908 		   union nfsd4_op_u *u)
5909 {
5910 	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5911 	struct nfsd4_test_stateid_id *stateid;
5912 	struct nfs4_client *cl = cstate->clp;
5913 
5914 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5915 		stateid->ts_id_status =
5916 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5917 
5918 	return nfs_ok;
5919 }
5920 
5921 static __be32
5922 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5923 {
5924 	struct nfs4_ol_stateid *stp = openlockstateid(s);
5925 	__be32 ret;
5926 
5927 	ret = nfsd4_lock_ol_stateid(stp);
5928 	if (ret)
5929 		goto out_put_stid;
5930 
5931 	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5932 	if (ret)
5933 		goto out;
5934 
5935 	ret = nfserr_locks_held;
5936 	if (check_for_locks(stp->st_stid.sc_file,
5937 			    lockowner(stp->st_stateowner)))
5938 		goto out;
5939 
5940 	release_lock_stateid(stp);
5941 	ret = nfs_ok;
5942 
5943 out:
5944 	mutex_unlock(&stp->st_mutex);
5945 out_put_stid:
5946 	nfs4_put_stid(s);
5947 	return ret;
5948 }
5949 
5950 __be32
5951 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5952 		   union nfsd4_op_u *u)
5953 {
5954 	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5955 	stateid_t *stateid = &free_stateid->fr_stateid;
5956 	struct nfs4_stid *s;
5957 	struct nfs4_delegation *dp;
5958 	struct nfs4_client *cl = cstate->clp;
5959 	__be32 ret = nfserr_bad_stateid;
5960 
5961 	spin_lock(&cl->cl_lock);
5962 	s = find_stateid_locked(cl, stateid);
5963 	if (!s)
5964 		goto out_unlock;
5965 	spin_lock(&s->sc_lock);
5966 	switch (s->sc_type) {
5967 	case NFS4_DELEG_STID:
5968 		ret = nfserr_locks_held;
5969 		break;
5970 	case NFS4_OPEN_STID:
5971 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5972 		if (ret)
5973 			break;
5974 		ret = nfserr_locks_held;
5975 		break;
5976 	case NFS4_LOCK_STID:
5977 		spin_unlock(&s->sc_lock);
5978 		refcount_inc(&s->sc_count);
5979 		spin_unlock(&cl->cl_lock);
5980 		ret = nfsd4_free_lock_stateid(stateid, s);
5981 		goto out;
5982 	case NFS4_REVOKED_DELEG_STID:
5983 		spin_unlock(&s->sc_lock);
5984 		dp = delegstateid(s);
5985 		list_del_init(&dp->dl_recall_lru);
5986 		spin_unlock(&cl->cl_lock);
5987 		nfs4_put_stid(s);
5988 		ret = nfs_ok;
5989 		goto out;
5990 	/* Default falls through and returns nfserr_bad_stateid */
5991 	}
5992 	spin_unlock(&s->sc_lock);
5993 out_unlock:
5994 	spin_unlock(&cl->cl_lock);
5995 out:
5996 	return ret;
5997 }
5998 
5999 static inline int
6000 setlkflg (int type)
6001 {
6002 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6003 		RD_STATE : WR_STATE;
6004 }
6005 
6006 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6007 {
6008 	struct svc_fh *current_fh = &cstate->current_fh;
6009 	struct nfs4_stateowner *sop = stp->st_stateowner;
6010 	__be32 status;
6011 
6012 	status = nfsd4_check_seqid(cstate, sop, seqid);
6013 	if (status)
6014 		return status;
6015 	status = nfsd4_lock_ol_stateid(stp);
6016 	if (status != nfs_ok)
6017 		return status;
6018 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6019 	if (status == nfs_ok)
6020 		status = nfs4_check_fh(current_fh, &stp->st_stid);
6021 	if (status != nfs_ok)
6022 		mutex_unlock(&stp->st_mutex);
6023 	return status;
6024 }
6025 
6026 /*
6027  * Checks for sequence id mutating operations.
6028  */
6029 static __be32
6030 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6031 			 stateid_t *stateid, char typemask,
6032 			 struct nfs4_ol_stateid **stpp,
6033 			 struct nfsd_net *nn)
6034 {
6035 	__be32 status;
6036 	struct nfs4_stid *s;
6037 	struct nfs4_ol_stateid *stp = NULL;
6038 
6039 	trace_nfsd_preprocess(seqid, stateid);
6040 
6041 	*stpp = NULL;
6042 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6043 	if (status)
6044 		return status;
6045 	stp = openlockstateid(s);
6046 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6047 
6048 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6049 	if (!status)
6050 		*stpp = stp;
6051 	else
6052 		nfs4_put_stid(&stp->st_stid);
6053 	return status;
6054 }
6055 
6056 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6057 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6058 {
6059 	__be32 status;
6060 	struct nfs4_openowner *oo;
6061 	struct nfs4_ol_stateid *stp;
6062 
6063 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6064 						NFS4_OPEN_STID, &stp, nn);
6065 	if (status)
6066 		return status;
6067 	oo = openowner(stp->st_stateowner);
6068 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6069 		mutex_unlock(&stp->st_mutex);
6070 		nfs4_put_stid(&stp->st_stid);
6071 		return nfserr_bad_stateid;
6072 	}
6073 	*stpp = stp;
6074 	return nfs_ok;
6075 }
6076 
6077 __be32
6078 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6079 		   union nfsd4_op_u *u)
6080 {
6081 	struct nfsd4_open_confirm *oc = &u->open_confirm;
6082 	__be32 status;
6083 	struct nfs4_openowner *oo;
6084 	struct nfs4_ol_stateid *stp;
6085 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6086 
6087 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6088 			cstate->current_fh.fh_dentry);
6089 
6090 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6091 	if (status)
6092 		return status;
6093 
6094 	status = nfs4_preprocess_seqid_op(cstate,
6095 					oc->oc_seqid, &oc->oc_req_stateid,
6096 					NFS4_OPEN_STID, &stp, nn);
6097 	if (status)
6098 		goto out;
6099 	oo = openowner(stp->st_stateowner);
6100 	status = nfserr_bad_stateid;
6101 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6102 		mutex_unlock(&stp->st_mutex);
6103 		goto put_stateid;
6104 	}
6105 	oo->oo_flags |= NFS4_OO_CONFIRMED;
6106 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6107 	mutex_unlock(&stp->st_mutex);
6108 	trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6109 	nfsd4_client_record_create(oo->oo_owner.so_client);
6110 	status = nfs_ok;
6111 put_stateid:
6112 	nfs4_put_stid(&stp->st_stid);
6113 out:
6114 	nfsd4_bump_seqid(cstate, status);
6115 	return status;
6116 }
6117 
6118 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6119 {
6120 	if (!test_access(access, stp))
6121 		return;
6122 	nfs4_file_put_access(stp->st_stid.sc_file, access);
6123 	clear_access(access, stp);
6124 }
6125 
6126 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6127 {
6128 	switch (to_access) {
6129 	case NFS4_SHARE_ACCESS_READ:
6130 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6131 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6132 		break;
6133 	case NFS4_SHARE_ACCESS_WRITE:
6134 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6135 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6136 		break;
6137 	case NFS4_SHARE_ACCESS_BOTH:
6138 		break;
6139 	default:
6140 		WARN_ON_ONCE(1);
6141 	}
6142 }
6143 
6144 __be32
6145 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6146 		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6147 {
6148 	struct nfsd4_open_downgrade *od = &u->open_downgrade;
6149 	__be32 status;
6150 	struct nfs4_ol_stateid *stp;
6151 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6152 
6153 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6154 			cstate->current_fh.fh_dentry);
6155 
6156 	/* We don't yet support WANT bits: */
6157 	if (od->od_deleg_want)
6158 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6159 			od->od_deleg_want);
6160 
6161 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6162 					&od->od_stateid, &stp, nn);
6163 	if (status)
6164 		goto out;
6165 	status = nfserr_inval;
6166 	if (!test_access(od->od_share_access, stp)) {
6167 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6168 			stp->st_access_bmap, od->od_share_access);
6169 		goto put_stateid;
6170 	}
6171 	if (!test_deny(od->od_share_deny, stp)) {
6172 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6173 			stp->st_deny_bmap, od->od_share_deny);
6174 		goto put_stateid;
6175 	}
6176 	nfs4_stateid_downgrade(stp, od->od_share_access);
6177 	reset_union_bmap_deny(od->od_share_deny, stp);
6178 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6179 	status = nfs_ok;
6180 put_stateid:
6181 	mutex_unlock(&stp->st_mutex);
6182 	nfs4_put_stid(&stp->st_stid);
6183 out:
6184 	nfsd4_bump_seqid(cstate, status);
6185 	return status;
6186 }
6187 
6188 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6189 {
6190 	struct nfs4_client *clp = s->st_stid.sc_client;
6191 	bool unhashed;
6192 	LIST_HEAD(reaplist);
6193 
6194 	spin_lock(&clp->cl_lock);
6195 	unhashed = unhash_open_stateid(s, &reaplist);
6196 
6197 	if (clp->cl_minorversion) {
6198 		if (unhashed)
6199 			put_ol_stateid_locked(s, &reaplist);
6200 		spin_unlock(&clp->cl_lock);
6201 		free_ol_stateid_reaplist(&reaplist);
6202 	} else {
6203 		spin_unlock(&clp->cl_lock);
6204 		free_ol_stateid_reaplist(&reaplist);
6205 		if (unhashed)
6206 			move_to_close_lru(s, clp->net);
6207 	}
6208 }
6209 
6210 /*
6211  * nfs4_unlock_state() called after encode
6212  */
6213 __be32
6214 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6215 		union nfsd4_op_u *u)
6216 {
6217 	struct nfsd4_close *close = &u->close;
6218 	__be32 status;
6219 	struct nfs4_ol_stateid *stp;
6220 	struct net *net = SVC_NET(rqstp);
6221 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6222 
6223 	dprintk("NFSD: nfsd4_close on file %pd\n",
6224 			cstate->current_fh.fh_dentry);
6225 
6226 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6227 					&close->cl_stateid,
6228 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
6229 					&stp, nn);
6230 	nfsd4_bump_seqid(cstate, status);
6231 	if (status)
6232 		goto out;
6233 
6234 	stp->st_stid.sc_type = NFS4_CLOSED_STID;
6235 
6236 	/*
6237 	 * Technically we don't _really_ have to increment or copy it, since
6238 	 * it should just be gone after this operation and we clobber the
6239 	 * copied value below, but we continue to do so here just to ensure
6240 	 * that racing ops see that there was a state change.
6241 	 */
6242 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6243 
6244 	nfsd4_close_open_stateid(stp);
6245 	mutex_unlock(&stp->st_mutex);
6246 
6247 	/* v4.1+ suggests that we send a special stateid in here, since the
6248 	 * clients should just ignore this anyway. Since this is not useful
6249 	 * for v4.0 clients either, we set it to the special close_stateid
6250 	 * universally.
6251 	 *
6252 	 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6253 	 */
6254 	memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6255 
6256 	/* put reference from nfs4_preprocess_seqid_op */
6257 	nfs4_put_stid(&stp->st_stid);
6258 out:
6259 	return status;
6260 }
6261 
6262 __be32
6263 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6264 		  union nfsd4_op_u *u)
6265 {
6266 	struct nfsd4_delegreturn *dr = &u->delegreturn;
6267 	struct nfs4_delegation *dp;
6268 	stateid_t *stateid = &dr->dr_stateid;
6269 	struct nfs4_stid *s;
6270 	__be32 status;
6271 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6272 
6273 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6274 		return status;
6275 
6276 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6277 	if (status)
6278 		goto out;
6279 	dp = delegstateid(s);
6280 	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6281 	if (status)
6282 		goto put_stateid;
6283 
6284 	destroy_delegation(dp);
6285 put_stateid:
6286 	nfs4_put_stid(&dp->dl_stid);
6287 out:
6288 	return status;
6289 }
6290 
6291 static inline u64
6292 end_offset(u64 start, u64 len)
6293 {
6294 	u64 end;
6295 
6296 	end = start + len;
6297 	return end >= start ? end: NFS4_MAX_UINT64;
6298 }
6299 
6300 /* last octet in a range */
6301 static inline u64
6302 last_byte_offset(u64 start, u64 len)
6303 {
6304 	u64 end;
6305 
6306 	WARN_ON_ONCE(!len);
6307 	end = start + len;
6308 	return end > start ? end - 1: NFS4_MAX_UINT64;
6309 }
6310 
6311 /*
6312  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6313  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6314  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
6315  * locking, this prevents us from being completely protocol-compliant.  The
6316  * real solution to this problem is to start using unsigned file offsets in
6317  * the VFS, but this is a very deep change!
6318  */
6319 static inline void
6320 nfs4_transform_lock_offset(struct file_lock *lock)
6321 {
6322 	if (lock->fl_start < 0)
6323 		lock->fl_start = OFFSET_MAX;
6324 	if (lock->fl_end < 0)
6325 		lock->fl_end = OFFSET_MAX;
6326 }
6327 
6328 static fl_owner_t
6329 nfsd4_fl_get_owner(fl_owner_t owner)
6330 {
6331 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6332 
6333 	nfs4_get_stateowner(&lo->lo_owner);
6334 	return owner;
6335 }
6336 
6337 static void
6338 nfsd4_fl_put_owner(fl_owner_t owner)
6339 {
6340 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6341 
6342 	if (lo)
6343 		nfs4_put_stateowner(&lo->lo_owner);
6344 }
6345 
6346 static void
6347 nfsd4_lm_notify(struct file_lock *fl)
6348 {
6349 	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
6350 	struct net			*net = lo->lo_owner.so_client->net;
6351 	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
6352 	struct nfsd4_blocked_lock	*nbl = container_of(fl,
6353 						struct nfsd4_blocked_lock, nbl_lock);
6354 	bool queue = false;
6355 
6356 	/* An empty list means that something else is going to be using it */
6357 	spin_lock(&nn->blocked_locks_lock);
6358 	if (!list_empty(&nbl->nbl_list)) {
6359 		list_del_init(&nbl->nbl_list);
6360 		list_del_init(&nbl->nbl_lru);
6361 		queue = true;
6362 	}
6363 	spin_unlock(&nn->blocked_locks_lock);
6364 
6365 	if (queue)
6366 		nfsd4_run_cb(&nbl->nbl_cb);
6367 }
6368 
6369 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
6370 	.lm_notify = nfsd4_lm_notify,
6371 	.lm_get_owner = nfsd4_fl_get_owner,
6372 	.lm_put_owner = nfsd4_fl_put_owner,
6373 };
6374 
6375 static inline void
6376 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6377 {
6378 	struct nfs4_lockowner *lo;
6379 
6380 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6381 		lo = (struct nfs4_lockowner *) fl->fl_owner;
6382 		xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6383 						GFP_KERNEL);
6384 		if (!deny->ld_owner.data)
6385 			/* We just don't care that much */
6386 			goto nevermind;
6387 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6388 	} else {
6389 nevermind:
6390 		deny->ld_owner.len = 0;
6391 		deny->ld_owner.data = NULL;
6392 		deny->ld_clientid.cl_boot = 0;
6393 		deny->ld_clientid.cl_id = 0;
6394 	}
6395 	deny->ld_start = fl->fl_start;
6396 	deny->ld_length = NFS4_MAX_UINT64;
6397 	if (fl->fl_end != NFS4_MAX_UINT64)
6398 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
6399 	deny->ld_type = NFS4_READ_LT;
6400 	if (fl->fl_type != F_RDLCK)
6401 		deny->ld_type = NFS4_WRITE_LT;
6402 }
6403 
6404 static struct nfs4_lockowner *
6405 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6406 {
6407 	unsigned int strhashval = ownerstr_hashval(owner);
6408 	struct nfs4_stateowner *so;
6409 
6410 	lockdep_assert_held(&clp->cl_lock);
6411 
6412 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6413 			    so_strhash) {
6414 		if (so->so_is_open_owner)
6415 			continue;
6416 		if (same_owner_str(so, owner))
6417 			return lockowner(nfs4_get_stateowner(so));
6418 	}
6419 	return NULL;
6420 }
6421 
6422 static struct nfs4_lockowner *
6423 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6424 {
6425 	struct nfs4_lockowner *lo;
6426 
6427 	spin_lock(&clp->cl_lock);
6428 	lo = find_lockowner_str_locked(clp, owner);
6429 	spin_unlock(&clp->cl_lock);
6430 	return lo;
6431 }
6432 
6433 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6434 {
6435 	unhash_lockowner_locked(lockowner(sop));
6436 }
6437 
6438 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6439 {
6440 	struct nfs4_lockowner *lo = lockowner(sop);
6441 
6442 	kmem_cache_free(lockowner_slab, lo);
6443 }
6444 
6445 static const struct nfs4_stateowner_operations lockowner_ops = {
6446 	.so_unhash =	nfs4_unhash_lockowner,
6447 	.so_free =	nfs4_free_lockowner,
6448 };
6449 
6450 /*
6451  * Alloc a lock owner structure.
6452  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6453  * occurred.
6454  *
6455  * strhashval = ownerstr_hashval
6456  */
6457 static struct nfs4_lockowner *
6458 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6459 			   struct nfs4_ol_stateid *open_stp,
6460 			   struct nfsd4_lock *lock)
6461 {
6462 	struct nfs4_lockowner *lo, *ret;
6463 
6464 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6465 	if (!lo)
6466 		return NULL;
6467 	INIT_LIST_HEAD(&lo->lo_blocked);
6468 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6469 	lo->lo_owner.so_is_open_owner = 0;
6470 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6471 	lo->lo_owner.so_ops = &lockowner_ops;
6472 	spin_lock(&clp->cl_lock);
6473 	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6474 	if (ret == NULL) {
6475 		list_add(&lo->lo_owner.so_strhash,
6476 			 &clp->cl_ownerstr_hashtbl[strhashval]);
6477 		ret = lo;
6478 	} else
6479 		nfs4_free_stateowner(&lo->lo_owner);
6480 
6481 	spin_unlock(&clp->cl_lock);
6482 	return ret;
6483 }
6484 
6485 static struct nfs4_ol_stateid *
6486 find_lock_stateid(const struct nfs4_lockowner *lo,
6487 		  const struct nfs4_ol_stateid *ost)
6488 {
6489 	struct nfs4_ol_stateid *lst;
6490 
6491 	lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6492 
6493 	/* If ost is not hashed, ost->st_locks will not be valid */
6494 	if (!nfs4_ol_stateid_unhashed(ost))
6495 		list_for_each_entry(lst, &ost->st_locks, st_locks) {
6496 			if (lst->st_stateowner == &lo->lo_owner) {
6497 				refcount_inc(&lst->st_stid.sc_count);
6498 				return lst;
6499 			}
6500 		}
6501 	return NULL;
6502 }
6503 
6504 static struct nfs4_ol_stateid *
6505 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6506 		  struct nfs4_file *fp, struct inode *inode,
6507 		  struct nfs4_ol_stateid *open_stp)
6508 {
6509 	struct nfs4_client *clp = lo->lo_owner.so_client;
6510 	struct nfs4_ol_stateid *retstp;
6511 
6512 	mutex_init(&stp->st_mutex);
6513 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6514 retry:
6515 	spin_lock(&clp->cl_lock);
6516 	if (nfs4_ol_stateid_unhashed(open_stp))
6517 		goto out_close;
6518 	retstp = find_lock_stateid(lo, open_stp);
6519 	if (retstp)
6520 		goto out_found;
6521 	refcount_inc(&stp->st_stid.sc_count);
6522 	stp->st_stid.sc_type = NFS4_LOCK_STID;
6523 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6524 	get_nfs4_file(fp);
6525 	stp->st_stid.sc_file = fp;
6526 	stp->st_access_bmap = 0;
6527 	stp->st_deny_bmap = open_stp->st_deny_bmap;
6528 	stp->st_openstp = open_stp;
6529 	spin_lock(&fp->fi_lock);
6530 	list_add(&stp->st_locks, &open_stp->st_locks);
6531 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6532 	list_add(&stp->st_perfile, &fp->fi_stateids);
6533 	spin_unlock(&fp->fi_lock);
6534 	spin_unlock(&clp->cl_lock);
6535 	return stp;
6536 out_found:
6537 	spin_unlock(&clp->cl_lock);
6538 	if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6539 		nfs4_put_stid(&retstp->st_stid);
6540 		goto retry;
6541 	}
6542 	/* To keep mutex tracking happy */
6543 	mutex_unlock(&stp->st_mutex);
6544 	return retstp;
6545 out_close:
6546 	spin_unlock(&clp->cl_lock);
6547 	mutex_unlock(&stp->st_mutex);
6548 	return NULL;
6549 }
6550 
6551 static struct nfs4_ol_stateid *
6552 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6553 			    struct inode *inode, struct nfs4_ol_stateid *ost,
6554 			    bool *new)
6555 {
6556 	struct nfs4_stid *ns = NULL;
6557 	struct nfs4_ol_stateid *lst;
6558 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6559 	struct nfs4_client *clp = oo->oo_owner.so_client;
6560 
6561 	*new = false;
6562 	spin_lock(&clp->cl_lock);
6563 	lst = find_lock_stateid(lo, ost);
6564 	spin_unlock(&clp->cl_lock);
6565 	if (lst != NULL) {
6566 		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6567 			goto out;
6568 		nfs4_put_stid(&lst->st_stid);
6569 	}
6570 	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6571 	if (ns == NULL)
6572 		return NULL;
6573 
6574 	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6575 	if (lst == openlockstateid(ns))
6576 		*new = true;
6577 	else
6578 		nfs4_put_stid(ns);
6579 out:
6580 	return lst;
6581 }
6582 
6583 static int
6584 check_lock_length(u64 offset, u64 length)
6585 {
6586 	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6587 		(length > ~offset)));
6588 }
6589 
6590 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6591 {
6592 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6593 
6594 	lockdep_assert_held(&fp->fi_lock);
6595 
6596 	if (test_access(access, lock_stp))
6597 		return;
6598 	__nfs4_file_get_access(fp, access);
6599 	set_access(access, lock_stp);
6600 }
6601 
6602 static __be32
6603 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6604 			    struct nfs4_ol_stateid *ost,
6605 			    struct nfsd4_lock *lock,
6606 			    struct nfs4_ol_stateid **plst, bool *new)
6607 {
6608 	__be32 status;
6609 	struct nfs4_file *fi = ost->st_stid.sc_file;
6610 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6611 	struct nfs4_client *cl = oo->oo_owner.so_client;
6612 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6613 	struct nfs4_lockowner *lo;
6614 	struct nfs4_ol_stateid *lst;
6615 	unsigned int strhashval;
6616 
6617 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
6618 	if (!lo) {
6619 		strhashval = ownerstr_hashval(&lock->lk_new_owner);
6620 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6621 		if (lo == NULL)
6622 			return nfserr_jukebox;
6623 	} else {
6624 		/* with an existing lockowner, seqids must be the same */
6625 		status = nfserr_bad_seqid;
6626 		if (!cstate->minorversion &&
6627 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6628 			goto out;
6629 	}
6630 
6631 	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6632 	if (lst == NULL) {
6633 		status = nfserr_jukebox;
6634 		goto out;
6635 	}
6636 
6637 	status = nfs_ok;
6638 	*plst = lst;
6639 out:
6640 	nfs4_put_stateowner(&lo->lo_owner);
6641 	return status;
6642 }
6643 
6644 /*
6645  *  LOCK operation
6646  */
6647 __be32
6648 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6649 	   union nfsd4_op_u *u)
6650 {
6651 	struct nfsd4_lock *lock = &u->lock;
6652 	struct nfs4_openowner *open_sop = NULL;
6653 	struct nfs4_lockowner *lock_sop = NULL;
6654 	struct nfs4_ol_stateid *lock_stp = NULL;
6655 	struct nfs4_ol_stateid *open_stp = NULL;
6656 	struct nfs4_file *fp;
6657 	struct nfsd_file *nf = NULL;
6658 	struct nfsd4_blocked_lock *nbl = NULL;
6659 	struct file_lock *file_lock = NULL;
6660 	struct file_lock *conflock = NULL;
6661 	__be32 status = 0;
6662 	int lkflg;
6663 	int err;
6664 	bool new = false;
6665 	unsigned char fl_type;
6666 	unsigned int fl_flags = FL_POSIX;
6667 	struct net *net = SVC_NET(rqstp);
6668 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6669 
6670 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6671 		(long long) lock->lk_offset,
6672 		(long long) lock->lk_length);
6673 
6674 	if (check_lock_length(lock->lk_offset, lock->lk_length))
6675 		 return nfserr_inval;
6676 
6677 	if ((status = fh_verify(rqstp, &cstate->current_fh,
6678 				S_IFREG, NFSD_MAY_LOCK))) {
6679 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
6680 		return status;
6681 	}
6682 
6683 	if (lock->lk_is_new) {
6684 		if (nfsd4_has_session(cstate))
6685 			/* See rfc 5661 18.10.3: given clientid is ignored: */
6686 			memcpy(&lock->lk_new_clientid,
6687 				&cstate->clp->cl_clientid,
6688 				sizeof(clientid_t));
6689 
6690 		/* validate and update open stateid and open seqid */
6691 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
6692 				        lock->lk_new_open_seqid,
6693 		                        &lock->lk_new_open_stateid,
6694 					&open_stp, nn);
6695 		if (status)
6696 			goto out;
6697 		mutex_unlock(&open_stp->st_mutex);
6698 		open_sop = openowner(open_stp->st_stateowner);
6699 		status = nfserr_bad_stateid;
6700 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6701 						&lock->lk_new_clientid))
6702 			goto out;
6703 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
6704 							&lock_stp, &new);
6705 	} else {
6706 		status = nfs4_preprocess_seqid_op(cstate,
6707 				       lock->lk_old_lock_seqid,
6708 				       &lock->lk_old_lock_stateid,
6709 				       NFS4_LOCK_STID, &lock_stp, nn);
6710 	}
6711 	if (status)
6712 		goto out;
6713 	lock_sop = lockowner(lock_stp->st_stateowner);
6714 
6715 	lkflg = setlkflg(lock->lk_type);
6716 	status = nfs4_check_openmode(lock_stp, lkflg);
6717 	if (status)
6718 		goto out;
6719 
6720 	status = nfserr_grace;
6721 	if (locks_in_grace(net) && !lock->lk_reclaim)
6722 		goto out;
6723 	status = nfserr_no_grace;
6724 	if (!locks_in_grace(net) && lock->lk_reclaim)
6725 		goto out;
6726 
6727 	fp = lock_stp->st_stid.sc_file;
6728 	switch (lock->lk_type) {
6729 		case NFS4_READW_LT:
6730 			if (nfsd4_has_session(cstate))
6731 				fl_flags |= FL_SLEEP;
6732 			fallthrough;
6733 		case NFS4_READ_LT:
6734 			spin_lock(&fp->fi_lock);
6735 			nf = find_readable_file_locked(fp);
6736 			if (nf)
6737 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6738 			spin_unlock(&fp->fi_lock);
6739 			fl_type = F_RDLCK;
6740 			break;
6741 		case NFS4_WRITEW_LT:
6742 			if (nfsd4_has_session(cstate))
6743 				fl_flags |= FL_SLEEP;
6744 			fallthrough;
6745 		case NFS4_WRITE_LT:
6746 			spin_lock(&fp->fi_lock);
6747 			nf = find_writeable_file_locked(fp);
6748 			if (nf)
6749 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6750 			spin_unlock(&fp->fi_lock);
6751 			fl_type = F_WRLCK;
6752 			break;
6753 		default:
6754 			status = nfserr_inval;
6755 		goto out;
6756 	}
6757 
6758 	if (!nf) {
6759 		status = nfserr_openmode;
6760 		goto out;
6761 	}
6762 
6763 	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6764 	if (!nbl) {
6765 		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6766 		status = nfserr_jukebox;
6767 		goto out;
6768 	}
6769 
6770 	file_lock = &nbl->nbl_lock;
6771 	file_lock->fl_type = fl_type;
6772 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6773 	file_lock->fl_pid = current->tgid;
6774 	file_lock->fl_file = nf->nf_file;
6775 	file_lock->fl_flags = fl_flags;
6776 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6777 	file_lock->fl_start = lock->lk_offset;
6778 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6779 	nfs4_transform_lock_offset(file_lock);
6780 
6781 	conflock = locks_alloc_lock();
6782 	if (!conflock) {
6783 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6784 		status = nfserr_jukebox;
6785 		goto out;
6786 	}
6787 
6788 	if (fl_flags & FL_SLEEP) {
6789 		nbl->nbl_time = ktime_get_boottime_seconds();
6790 		spin_lock(&nn->blocked_locks_lock);
6791 		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6792 		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6793 		spin_unlock(&nn->blocked_locks_lock);
6794 	}
6795 
6796 	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6797 	switch (err) {
6798 	case 0: /* success! */
6799 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6800 		status = 0;
6801 		if (lock->lk_reclaim)
6802 			nn->somebody_reclaimed = true;
6803 		break;
6804 	case FILE_LOCK_DEFERRED:
6805 		nbl = NULL;
6806 		fallthrough;
6807 	case -EAGAIN:		/* conflock holds conflicting lock */
6808 		status = nfserr_denied;
6809 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6810 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
6811 		break;
6812 	case -EDEADLK:
6813 		status = nfserr_deadlock;
6814 		break;
6815 	default:
6816 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6817 		status = nfserrno(err);
6818 		break;
6819 	}
6820 out:
6821 	if (nbl) {
6822 		/* dequeue it if we queued it before */
6823 		if (fl_flags & FL_SLEEP) {
6824 			spin_lock(&nn->blocked_locks_lock);
6825 			list_del_init(&nbl->nbl_list);
6826 			list_del_init(&nbl->nbl_lru);
6827 			spin_unlock(&nn->blocked_locks_lock);
6828 		}
6829 		free_blocked_lock(nbl);
6830 	}
6831 	if (nf)
6832 		nfsd_file_put(nf);
6833 	if (lock_stp) {
6834 		/* Bump seqid manually if the 4.0 replay owner is openowner */
6835 		if (cstate->replay_owner &&
6836 		    cstate->replay_owner != &lock_sop->lo_owner &&
6837 		    seqid_mutating_err(ntohl(status)))
6838 			lock_sop->lo_owner.so_seqid++;
6839 
6840 		/*
6841 		 * If this is a new, never-before-used stateid, and we are
6842 		 * returning an error, then just go ahead and release it.
6843 		 */
6844 		if (status && new)
6845 			release_lock_stateid(lock_stp);
6846 
6847 		mutex_unlock(&lock_stp->st_mutex);
6848 
6849 		nfs4_put_stid(&lock_stp->st_stid);
6850 	}
6851 	if (open_stp)
6852 		nfs4_put_stid(&open_stp->st_stid);
6853 	nfsd4_bump_seqid(cstate, status);
6854 	if (conflock)
6855 		locks_free_lock(conflock);
6856 	return status;
6857 }
6858 
6859 /*
6860  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6861  * so we do a temporary open here just to get an open file to pass to
6862  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
6863  * inode operation.)
6864  */
6865 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6866 {
6867 	struct nfsd_file *nf;
6868 	__be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
6869 	if (!err) {
6870 		err = nfserrno(vfs_test_lock(nf->nf_file, lock));
6871 		nfsd_file_put(nf);
6872 	}
6873 	return err;
6874 }
6875 
6876 /*
6877  * LOCKT operation
6878  */
6879 __be32
6880 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6881 	    union nfsd4_op_u *u)
6882 {
6883 	struct nfsd4_lockt *lockt = &u->lockt;
6884 	struct file_lock *file_lock = NULL;
6885 	struct nfs4_lockowner *lo = NULL;
6886 	__be32 status;
6887 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6888 
6889 	if (locks_in_grace(SVC_NET(rqstp)))
6890 		return nfserr_grace;
6891 
6892 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6893 		 return nfserr_inval;
6894 
6895 	if (!nfsd4_has_session(cstate)) {
6896 		status = set_client(&lockt->lt_clientid, cstate, nn);
6897 		if (status)
6898 			goto out;
6899 	}
6900 
6901 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6902 		goto out;
6903 
6904 	file_lock = locks_alloc_lock();
6905 	if (!file_lock) {
6906 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6907 		status = nfserr_jukebox;
6908 		goto out;
6909 	}
6910 
6911 	switch (lockt->lt_type) {
6912 		case NFS4_READ_LT:
6913 		case NFS4_READW_LT:
6914 			file_lock->fl_type = F_RDLCK;
6915 			break;
6916 		case NFS4_WRITE_LT:
6917 		case NFS4_WRITEW_LT:
6918 			file_lock->fl_type = F_WRLCK;
6919 			break;
6920 		default:
6921 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6922 			status = nfserr_inval;
6923 			goto out;
6924 	}
6925 
6926 	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6927 	if (lo)
6928 		file_lock->fl_owner = (fl_owner_t)lo;
6929 	file_lock->fl_pid = current->tgid;
6930 	file_lock->fl_flags = FL_POSIX;
6931 
6932 	file_lock->fl_start = lockt->lt_offset;
6933 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6934 
6935 	nfs4_transform_lock_offset(file_lock);
6936 
6937 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6938 	if (status)
6939 		goto out;
6940 
6941 	if (file_lock->fl_type != F_UNLCK) {
6942 		status = nfserr_denied;
6943 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6944 	}
6945 out:
6946 	if (lo)
6947 		nfs4_put_stateowner(&lo->lo_owner);
6948 	if (file_lock)
6949 		locks_free_lock(file_lock);
6950 	return status;
6951 }
6952 
6953 __be32
6954 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6955 	    union nfsd4_op_u *u)
6956 {
6957 	struct nfsd4_locku *locku = &u->locku;
6958 	struct nfs4_ol_stateid *stp;
6959 	struct nfsd_file *nf = NULL;
6960 	struct file_lock *file_lock = NULL;
6961 	__be32 status;
6962 	int err;
6963 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6964 
6965 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6966 		(long long) locku->lu_offset,
6967 		(long long) locku->lu_length);
6968 
6969 	if (check_lock_length(locku->lu_offset, locku->lu_length))
6970 		 return nfserr_inval;
6971 
6972 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6973 					&locku->lu_stateid, NFS4_LOCK_STID,
6974 					&stp, nn);
6975 	if (status)
6976 		goto out;
6977 	nf = find_any_file(stp->st_stid.sc_file);
6978 	if (!nf) {
6979 		status = nfserr_lock_range;
6980 		goto put_stateid;
6981 	}
6982 	file_lock = locks_alloc_lock();
6983 	if (!file_lock) {
6984 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6985 		status = nfserr_jukebox;
6986 		goto put_file;
6987 	}
6988 
6989 	file_lock->fl_type = F_UNLCK;
6990 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6991 	file_lock->fl_pid = current->tgid;
6992 	file_lock->fl_file = nf->nf_file;
6993 	file_lock->fl_flags = FL_POSIX;
6994 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6995 	file_lock->fl_start = locku->lu_offset;
6996 
6997 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
6998 						locku->lu_length);
6999 	nfs4_transform_lock_offset(file_lock);
7000 
7001 	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7002 	if (err) {
7003 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7004 		goto out_nfserr;
7005 	}
7006 	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7007 put_file:
7008 	nfsd_file_put(nf);
7009 put_stateid:
7010 	mutex_unlock(&stp->st_mutex);
7011 	nfs4_put_stid(&stp->st_stid);
7012 out:
7013 	nfsd4_bump_seqid(cstate, status);
7014 	if (file_lock)
7015 		locks_free_lock(file_lock);
7016 	return status;
7017 
7018 out_nfserr:
7019 	status = nfserrno(err);
7020 	goto put_file;
7021 }
7022 
7023 /*
7024  * returns
7025  * 	true:  locks held by lockowner
7026  * 	false: no locks held by lockowner
7027  */
7028 static bool
7029 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7030 {
7031 	struct file_lock *fl;
7032 	int status = false;
7033 	struct nfsd_file *nf = find_any_file(fp);
7034 	struct inode *inode;
7035 	struct file_lock_context *flctx;
7036 
7037 	if (!nf) {
7038 		/* Any valid lock stateid should have some sort of access */
7039 		WARN_ON_ONCE(1);
7040 		return status;
7041 	}
7042 
7043 	inode = locks_inode(nf->nf_file);
7044 	flctx = inode->i_flctx;
7045 
7046 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7047 		spin_lock(&flctx->flc_lock);
7048 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7049 			if (fl->fl_owner == (fl_owner_t)lowner) {
7050 				status = true;
7051 				break;
7052 			}
7053 		}
7054 		spin_unlock(&flctx->flc_lock);
7055 	}
7056 	nfsd_file_put(nf);
7057 	return status;
7058 }
7059 
7060 __be32
7061 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7062 			struct nfsd4_compound_state *cstate,
7063 			union nfsd4_op_u *u)
7064 {
7065 	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7066 	clientid_t *clid = &rlockowner->rl_clientid;
7067 	struct nfs4_stateowner *sop;
7068 	struct nfs4_lockowner *lo = NULL;
7069 	struct nfs4_ol_stateid *stp;
7070 	struct xdr_netobj *owner = &rlockowner->rl_owner;
7071 	unsigned int hashval = ownerstr_hashval(owner);
7072 	__be32 status;
7073 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7074 	struct nfs4_client *clp;
7075 	LIST_HEAD (reaplist);
7076 
7077 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7078 		clid->cl_boot, clid->cl_id);
7079 
7080 	status = set_client(clid, cstate, nn);
7081 	if (status)
7082 		return status;
7083 
7084 	clp = cstate->clp;
7085 	/* Find the matching lock stateowner */
7086 	spin_lock(&clp->cl_lock);
7087 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
7088 			    so_strhash) {
7089 
7090 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
7091 			continue;
7092 
7093 		/* see if there are still any locks associated with it */
7094 		lo = lockowner(sop);
7095 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
7096 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
7097 				status = nfserr_locks_held;
7098 				spin_unlock(&clp->cl_lock);
7099 				return status;
7100 			}
7101 		}
7102 
7103 		nfs4_get_stateowner(sop);
7104 		break;
7105 	}
7106 	if (!lo) {
7107 		spin_unlock(&clp->cl_lock);
7108 		return status;
7109 	}
7110 
7111 	unhash_lockowner_locked(lo);
7112 	while (!list_empty(&lo->lo_owner.so_stateids)) {
7113 		stp = list_first_entry(&lo->lo_owner.so_stateids,
7114 				       struct nfs4_ol_stateid,
7115 				       st_perstateowner);
7116 		WARN_ON(!unhash_lock_stateid(stp));
7117 		put_ol_stateid_locked(stp, &reaplist);
7118 	}
7119 	spin_unlock(&clp->cl_lock);
7120 	free_ol_stateid_reaplist(&reaplist);
7121 	remove_blocked_locks(lo);
7122 	nfs4_put_stateowner(&lo->lo_owner);
7123 
7124 	return status;
7125 }
7126 
7127 static inline struct nfs4_client_reclaim *
7128 alloc_reclaim(void)
7129 {
7130 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7131 }
7132 
7133 bool
7134 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7135 {
7136 	struct nfs4_client_reclaim *crp;
7137 
7138 	crp = nfsd4_find_reclaim_client(name, nn);
7139 	return (crp && crp->cr_clp);
7140 }
7141 
7142 /*
7143  * failure => all reset bets are off, nfserr_no_grace...
7144  *
7145  * The caller is responsible for freeing name.data if NULL is returned (it
7146  * will be freed in nfs4_remove_reclaim_record in the normal case).
7147  */
7148 struct nfs4_client_reclaim *
7149 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7150 		struct nfsd_net *nn)
7151 {
7152 	unsigned int strhashval;
7153 	struct nfs4_client_reclaim *crp;
7154 
7155 	trace_nfsd_clid_reclaim(nn, name.len, name.data);
7156 	crp = alloc_reclaim();
7157 	if (crp) {
7158 		strhashval = clientstr_hashval(name);
7159 		INIT_LIST_HEAD(&crp->cr_strhash);
7160 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7161 		crp->cr_name.data = name.data;
7162 		crp->cr_name.len = name.len;
7163 		crp->cr_princhash.data = princhash.data;
7164 		crp->cr_princhash.len = princhash.len;
7165 		crp->cr_clp = NULL;
7166 		nn->reclaim_str_hashtbl_size++;
7167 	}
7168 	return crp;
7169 }
7170 
7171 void
7172 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7173 {
7174 	list_del(&crp->cr_strhash);
7175 	kfree(crp->cr_name.data);
7176 	kfree(crp->cr_princhash.data);
7177 	kfree(crp);
7178 	nn->reclaim_str_hashtbl_size--;
7179 }
7180 
7181 void
7182 nfs4_release_reclaim(struct nfsd_net *nn)
7183 {
7184 	struct nfs4_client_reclaim *crp = NULL;
7185 	int i;
7186 
7187 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7188 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7189 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7190 			                struct nfs4_client_reclaim, cr_strhash);
7191 			nfs4_remove_reclaim_record(crp, nn);
7192 		}
7193 	}
7194 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7195 }
7196 
7197 /*
7198  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7199 struct nfs4_client_reclaim *
7200 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7201 {
7202 	unsigned int strhashval;
7203 	struct nfs4_client_reclaim *crp = NULL;
7204 
7205 	trace_nfsd_clid_find(nn, name.len, name.data);
7206 
7207 	strhashval = clientstr_hashval(name);
7208 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7209 		if (compare_blob(&crp->cr_name, &name) == 0) {
7210 			return crp;
7211 		}
7212 	}
7213 	return NULL;
7214 }
7215 
7216 __be32
7217 nfs4_check_open_reclaim(struct nfs4_client *clp)
7218 {
7219 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7220 		return nfserr_no_grace;
7221 
7222 	if (nfsd4_client_record_check(clp))
7223 		return nfserr_reclaim_bad;
7224 
7225 	return nfs_ok;
7226 }
7227 
7228 /*
7229  * Since the lifetime of a delegation isn't limited to that of an open, a
7230  * client may quite reasonably hang on to a delegation as long as it has
7231  * the inode cached.  This becomes an obvious problem the first time a
7232  * client's inode cache approaches the size of the server's total memory.
7233  *
7234  * For now we avoid this problem by imposing a hard limit on the number
7235  * of delegations, which varies according to the server's memory size.
7236  */
7237 static void
7238 set_max_delegations(void)
7239 {
7240 	/*
7241 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
7242 	 * estimates suggest that in the worst case (where every delegation
7243 	 * is for a different inode), a delegation could take about 1.5K,
7244 	 * giving a worst case usage of about 6% of memory.
7245 	 */
7246 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7247 }
7248 
7249 static int nfs4_state_create_net(struct net *net)
7250 {
7251 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7252 	int i;
7253 
7254 	nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7255 					    sizeof(struct list_head),
7256 					    GFP_KERNEL);
7257 	if (!nn->conf_id_hashtbl)
7258 		goto err;
7259 	nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7260 					      sizeof(struct list_head),
7261 					      GFP_KERNEL);
7262 	if (!nn->unconf_id_hashtbl)
7263 		goto err_unconf_id;
7264 	nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7265 					      sizeof(struct list_head),
7266 					      GFP_KERNEL);
7267 	if (!nn->sessionid_hashtbl)
7268 		goto err_sessionid;
7269 
7270 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7271 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7272 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7273 	}
7274 	for (i = 0; i < SESSION_HASH_SIZE; i++)
7275 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7276 	nn->conf_name_tree = RB_ROOT;
7277 	nn->unconf_name_tree = RB_ROOT;
7278 	nn->boot_time = ktime_get_real_seconds();
7279 	nn->grace_ended = false;
7280 	nn->nfsd4_manager.block_opens = true;
7281 	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7282 	INIT_LIST_HEAD(&nn->client_lru);
7283 	INIT_LIST_HEAD(&nn->close_lru);
7284 	INIT_LIST_HEAD(&nn->del_recall_lru);
7285 	spin_lock_init(&nn->client_lock);
7286 	spin_lock_init(&nn->s2s_cp_lock);
7287 	idr_init(&nn->s2s_cp_stateids);
7288 
7289 	spin_lock_init(&nn->blocked_locks_lock);
7290 	INIT_LIST_HEAD(&nn->blocked_locks_lru);
7291 
7292 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7293 	get_net(net);
7294 
7295 	return 0;
7296 
7297 err_sessionid:
7298 	kfree(nn->unconf_id_hashtbl);
7299 err_unconf_id:
7300 	kfree(nn->conf_id_hashtbl);
7301 err:
7302 	return -ENOMEM;
7303 }
7304 
7305 static void
7306 nfs4_state_destroy_net(struct net *net)
7307 {
7308 	int i;
7309 	struct nfs4_client *clp = NULL;
7310 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7311 
7312 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7313 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
7314 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7315 			destroy_client(clp);
7316 		}
7317 	}
7318 
7319 	WARN_ON(!list_empty(&nn->blocked_locks_lru));
7320 
7321 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7322 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7323 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7324 			destroy_client(clp);
7325 		}
7326 	}
7327 
7328 	kfree(nn->sessionid_hashtbl);
7329 	kfree(nn->unconf_id_hashtbl);
7330 	kfree(nn->conf_id_hashtbl);
7331 	put_net(net);
7332 }
7333 
7334 int
7335 nfs4_state_start_net(struct net *net)
7336 {
7337 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7338 	int ret;
7339 
7340 	ret = nfs4_state_create_net(net);
7341 	if (ret)
7342 		return ret;
7343 	locks_start_grace(net, &nn->nfsd4_manager);
7344 	nfsd4_client_tracking_init(net);
7345 	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7346 		goto skip_grace;
7347 	printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7348 	       nn->nfsd4_grace, net->ns.inum);
7349 	trace_nfsd_grace_start(nn);
7350 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7351 	return 0;
7352 
7353 skip_grace:
7354 	printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7355 			net->ns.inum);
7356 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7357 	nfsd4_end_grace(nn);
7358 	return 0;
7359 }
7360 
7361 /* initialization to perform when the nfsd service is started: */
7362 
7363 int
7364 nfs4_state_start(void)
7365 {
7366 	int ret;
7367 
7368 	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7369 	if (laundry_wq == NULL) {
7370 		ret = -ENOMEM;
7371 		goto out;
7372 	}
7373 	ret = nfsd4_create_callback_queue();
7374 	if (ret)
7375 		goto out_free_laundry;
7376 
7377 	set_max_delegations();
7378 	return 0;
7379 
7380 out_free_laundry:
7381 	destroy_workqueue(laundry_wq);
7382 out:
7383 	return ret;
7384 }
7385 
7386 void
7387 nfs4_state_shutdown_net(struct net *net)
7388 {
7389 	struct nfs4_delegation *dp = NULL;
7390 	struct list_head *pos, *next, reaplist;
7391 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7392 
7393 	cancel_delayed_work_sync(&nn->laundromat_work);
7394 	locks_end_grace(&nn->nfsd4_manager);
7395 
7396 	INIT_LIST_HEAD(&reaplist);
7397 	spin_lock(&state_lock);
7398 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
7399 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7400 		WARN_ON(!unhash_delegation_locked(dp));
7401 		list_add(&dp->dl_recall_lru, &reaplist);
7402 	}
7403 	spin_unlock(&state_lock);
7404 	list_for_each_safe(pos, next, &reaplist) {
7405 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7406 		list_del_init(&dp->dl_recall_lru);
7407 		destroy_unhashed_deleg(dp);
7408 	}
7409 
7410 	nfsd4_client_tracking_exit(net);
7411 	nfs4_state_destroy_net(net);
7412 }
7413 
7414 void
7415 nfs4_state_shutdown(void)
7416 {
7417 	destroy_workqueue(laundry_wq);
7418 	nfsd4_destroy_callback_queue();
7419 }
7420 
7421 static void
7422 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7423 {
7424 	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7425 	    CURRENT_STATEID(stateid))
7426 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7427 }
7428 
7429 static void
7430 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7431 {
7432 	if (cstate->minorversion) {
7433 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7434 		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7435 	}
7436 }
7437 
7438 void
7439 clear_current_stateid(struct nfsd4_compound_state *cstate)
7440 {
7441 	CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7442 }
7443 
7444 /*
7445  * functions to set current state id
7446  */
7447 void
7448 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7449 		union nfsd4_op_u *u)
7450 {
7451 	put_stateid(cstate, &u->open_downgrade.od_stateid);
7452 }
7453 
7454 void
7455 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7456 		union nfsd4_op_u *u)
7457 {
7458 	put_stateid(cstate, &u->open.op_stateid);
7459 }
7460 
7461 void
7462 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7463 		union nfsd4_op_u *u)
7464 {
7465 	put_stateid(cstate, &u->close.cl_stateid);
7466 }
7467 
7468 void
7469 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7470 		union nfsd4_op_u *u)
7471 {
7472 	put_stateid(cstate, &u->lock.lk_resp_stateid);
7473 }
7474 
7475 /*
7476  * functions to consume current state id
7477  */
7478 
7479 void
7480 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7481 		union nfsd4_op_u *u)
7482 {
7483 	get_stateid(cstate, &u->open_downgrade.od_stateid);
7484 }
7485 
7486 void
7487 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7488 		union nfsd4_op_u *u)
7489 {
7490 	get_stateid(cstate, &u->delegreturn.dr_stateid);
7491 }
7492 
7493 void
7494 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7495 		union nfsd4_op_u *u)
7496 {
7497 	get_stateid(cstate, &u->free_stateid.fr_stateid);
7498 }
7499 
7500 void
7501 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7502 		union nfsd4_op_u *u)
7503 {
7504 	get_stateid(cstate, &u->setattr.sa_stateid);
7505 }
7506 
7507 void
7508 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7509 		union nfsd4_op_u *u)
7510 {
7511 	get_stateid(cstate, &u->close.cl_stateid);
7512 }
7513 
7514 void
7515 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7516 		union nfsd4_op_u *u)
7517 {
7518 	get_stateid(cstate, &u->locku.lu_stateid);
7519 }
7520 
7521 void
7522 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7523 		union nfsd4_op_u *u)
7524 {
7525 	get_stateid(cstate, &u->read.rd_stateid);
7526 }
7527 
7528 void
7529 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7530 		union nfsd4_op_u *u)
7531 {
7532 	get_stateid(cstate, &u->write.wr_stateid);
7533 }
7534