xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision 09a4f6f5)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include "xdr4.h"
47 #include "xdr4cb.h"
48 #include "vfs.h"
49 #include "current_stateid.h"
50 
51 #include "netns.h"
52 #include "pnfs.h"
53 #include "filecache.h"
54 #include "trace.h"
55 
56 #define NFSDDBG_FACILITY                NFSDDBG_PROC
57 
58 #define all_ones {{~0,~0},~0}
59 static const stateid_t one_stateid = {
60 	.si_generation = ~0,
61 	.si_opaque = all_ones,
62 };
63 static const stateid_t zero_stateid = {
64 	/* all fields zero */
65 };
66 static const stateid_t currentstateid = {
67 	.si_generation = 1,
68 };
69 static const stateid_t close_stateid = {
70 	.si_generation = 0xffffffffU,
71 };
72 
73 static u64 current_sessionid = 1;
74 
75 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
76 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
77 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
78 #define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
79 
80 /* forward declarations */
81 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
82 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
83 void nfsd4_end_grace(struct nfsd_net *nn);
84 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
85 
86 /* Locking: */
87 
88 /*
89  * Currently used for the del_recall_lru and file hash table.  In an
90  * effort to decrease the scope of the client_mutex, this spinlock may
91  * eventually cover more:
92  */
93 static DEFINE_SPINLOCK(state_lock);
94 
95 enum nfsd4_st_mutex_lock_subclass {
96 	OPEN_STATEID_MUTEX = 0,
97 	LOCK_STATEID_MUTEX = 1,
98 };
99 
100 /*
101  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
102  * the refcount on the open stateid to drop.
103  */
104 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
105 
106 /*
107  * A waitqueue where a writer to clients/#/ctl destroying a client can
108  * wait for cl_rpc_users to drop to 0 and then for the client to be
109  * unhashed.
110  */
111 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
112 
113 static struct kmem_cache *client_slab;
114 static struct kmem_cache *openowner_slab;
115 static struct kmem_cache *lockowner_slab;
116 static struct kmem_cache *file_slab;
117 static struct kmem_cache *stateid_slab;
118 static struct kmem_cache *deleg_slab;
119 static struct kmem_cache *odstate_slab;
120 
121 static void free_session(struct nfsd4_session *);
122 
123 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
124 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
125 
126 static bool is_session_dead(struct nfsd4_session *ses)
127 {
128 	return ses->se_flags & NFS4_SESSION_DEAD;
129 }
130 
131 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
132 {
133 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
134 		return nfserr_jukebox;
135 	ses->se_flags |= NFS4_SESSION_DEAD;
136 	return nfs_ok;
137 }
138 
139 static bool is_client_expired(struct nfs4_client *clp)
140 {
141 	return clp->cl_time == 0;
142 }
143 
144 static __be32 get_client_locked(struct nfs4_client *clp)
145 {
146 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
147 
148 	lockdep_assert_held(&nn->client_lock);
149 
150 	if (is_client_expired(clp))
151 		return nfserr_expired;
152 	atomic_inc(&clp->cl_rpc_users);
153 	return nfs_ok;
154 }
155 
156 /* must be called under the client_lock */
157 static inline void
158 renew_client_locked(struct nfs4_client *clp)
159 {
160 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
161 
162 	if (is_client_expired(clp)) {
163 		WARN_ON(1);
164 		printk("%s: client (clientid %08x/%08x) already expired\n",
165 			__func__,
166 			clp->cl_clientid.cl_boot,
167 			clp->cl_clientid.cl_id);
168 		return;
169 	}
170 
171 	list_move_tail(&clp->cl_lru, &nn->client_lru);
172 	clp->cl_time = ktime_get_boottime_seconds();
173 }
174 
175 static void put_client_renew_locked(struct nfs4_client *clp)
176 {
177 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178 
179 	lockdep_assert_held(&nn->client_lock);
180 
181 	if (!atomic_dec_and_test(&clp->cl_rpc_users))
182 		return;
183 	if (!is_client_expired(clp))
184 		renew_client_locked(clp);
185 	else
186 		wake_up_all(&expiry_wq);
187 }
188 
189 static void put_client_renew(struct nfs4_client *clp)
190 {
191 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
192 
193 	if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
194 		return;
195 	if (!is_client_expired(clp))
196 		renew_client_locked(clp);
197 	else
198 		wake_up_all(&expiry_wq);
199 	spin_unlock(&nn->client_lock);
200 }
201 
202 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
203 {
204 	__be32 status;
205 
206 	if (is_session_dead(ses))
207 		return nfserr_badsession;
208 	status = get_client_locked(ses->se_client);
209 	if (status)
210 		return status;
211 	atomic_inc(&ses->se_ref);
212 	return nfs_ok;
213 }
214 
215 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
216 {
217 	struct nfs4_client *clp = ses->se_client;
218 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
219 
220 	lockdep_assert_held(&nn->client_lock);
221 
222 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
223 		free_session(ses);
224 	put_client_renew_locked(clp);
225 }
226 
227 static void nfsd4_put_session(struct nfsd4_session *ses)
228 {
229 	struct nfs4_client *clp = ses->se_client;
230 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
231 
232 	spin_lock(&nn->client_lock);
233 	nfsd4_put_session_locked(ses);
234 	spin_unlock(&nn->client_lock);
235 }
236 
237 static struct nfsd4_blocked_lock *
238 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
239 			struct nfsd_net *nn)
240 {
241 	struct nfsd4_blocked_lock *cur, *found = NULL;
242 
243 	spin_lock(&nn->blocked_locks_lock);
244 	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
245 		if (fh_match(fh, &cur->nbl_fh)) {
246 			list_del_init(&cur->nbl_list);
247 			list_del_init(&cur->nbl_lru);
248 			found = cur;
249 			break;
250 		}
251 	}
252 	spin_unlock(&nn->blocked_locks_lock);
253 	if (found)
254 		locks_delete_block(&found->nbl_lock);
255 	return found;
256 }
257 
258 static struct nfsd4_blocked_lock *
259 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
260 			struct nfsd_net *nn)
261 {
262 	struct nfsd4_blocked_lock *nbl;
263 
264 	nbl = find_blocked_lock(lo, fh, nn);
265 	if (!nbl) {
266 		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
267 		if (nbl) {
268 			INIT_LIST_HEAD(&nbl->nbl_list);
269 			INIT_LIST_HEAD(&nbl->nbl_lru);
270 			fh_copy_shallow(&nbl->nbl_fh, fh);
271 			locks_init_lock(&nbl->nbl_lock);
272 			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
273 					&nfsd4_cb_notify_lock_ops,
274 					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
275 		}
276 	}
277 	return nbl;
278 }
279 
280 static void
281 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
282 {
283 	locks_delete_block(&nbl->nbl_lock);
284 	locks_release_private(&nbl->nbl_lock);
285 	kfree(nbl);
286 }
287 
288 static void
289 remove_blocked_locks(struct nfs4_lockowner *lo)
290 {
291 	struct nfs4_client *clp = lo->lo_owner.so_client;
292 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
293 	struct nfsd4_blocked_lock *nbl;
294 	LIST_HEAD(reaplist);
295 
296 	/* Dequeue all blocked locks */
297 	spin_lock(&nn->blocked_locks_lock);
298 	while (!list_empty(&lo->lo_blocked)) {
299 		nbl = list_first_entry(&lo->lo_blocked,
300 					struct nfsd4_blocked_lock,
301 					nbl_list);
302 		list_del_init(&nbl->nbl_list);
303 		list_move(&nbl->nbl_lru, &reaplist);
304 	}
305 	spin_unlock(&nn->blocked_locks_lock);
306 
307 	/* Now free them */
308 	while (!list_empty(&reaplist)) {
309 		nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
310 					nbl_lru);
311 		list_del_init(&nbl->nbl_lru);
312 		free_blocked_lock(nbl);
313 	}
314 }
315 
316 static void
317 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
318 {
319 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
320 						struct nfsd4_blocked_lock, nbl_cb);
321 	locks_delete_block(&nbl->nbl_lock);
322 }
323 
324 static int
325 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
326 {
327 	/*
328 	 * Since this is just an optimization, we don't try very hard if it
329 	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
330 	 * just quit trying on anything else.
331 	 */
332 	switch (task->tk_status) {
333 	case -NFS4ERR_DELAY:
334 		rpc_delay(task, 1 * HZ);
335 		return 0;
336 	default:
337 		return 1;
338 	}
339 }
340 
341 static void
342 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
343 {
344 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
345 						struct nfsd4_blocked_lock, nbl_cb);
346 
347 	free_blocked_lock(nbl);
348 }
349 
350 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
351 	.prepare	= nfsd4_cb_notify_lock_prepare,
352 	.done		= nfsd4_cb_notify_lock_done,
353 	.release	= nfsd4_cb_notify_lock_release,
354 };
355 
356 static inline struct nfs4_stateowner *
357 nfs4_get_stateowner(struct nfs4_stateowner *sop)
358 {
359 	atomic_inc(&sop->so_count);
360 	return sop;
361 }
362 
363 static int
364 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
365 {
366 	return (sop->so_owner.len == owner->len) &&
367 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
368 }
369 
370 static struct nfs4_openowner *
371 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
372 			struct nfs4_client *clp)
373 {
374 	struct nfs4_stateowner *so;
375 
376 	lockdep_assert_held(&clp->cl_lock);
377 
378 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
379 			    so_strhash) {
380 		if (!so->so_is_open_owner)
381 			continue;
382 		if (same_owner_str(so, &open->op_owner))
383 			return openowner(nfs4_get_stateowner(so));
384 	}
385 	return NULL;
386 }
387 
388 static struct nfs4_openowner *
389 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
390 			struct nfs4_client *clp)
391 {
392 	struct nfs4_openowner *oo;
393 
394 	spin_lock(&clp->cl_lock);
395 	oo = find_openstateowner_str_locked(hashval, open, clp);
396 	spin_unlock(&clp->cl_lock);
397 	return oo;
398 }
399 
400 static inline u32
401 opaque_hashval(const void *ptr, int nbytes)
402 {
403 	unsigned char *cptr = (unsigned char *) ptr;
404 
405 	u32 x = 0;
406 	while (nbytes--) {
407 		x *= 37;
408 		x += *cptr++;
409 	}
410 	return x;
411 }
412 
413 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
414 {
415 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
416 
417 	kmem_cache_free(file_slab, fp);
418 }
419 
420 void
421 put_nfs4_file(struct nfs4_file *fi)
422 {
423 	might_lock(&state_lock);
424 
425 	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
426 		hlist_del_rcu(&fi->fi_hash);
427 		spin_unlock(&state_lock);
428 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
429 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
430 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
431 	}
432 }
433 
434 static struct nfsd_file *
435 __nfs4_get_fd(struct nfs4_file *f, int oflag)
436 {
437 	if (f->fi_fds[oflag])
438 		return nfsd_file_get(f->fi_fds[oflag]);
439 	return NULL;
440 }
441 
442 static struct nfsd_file *
443 find_writeable_file_locked(struct nfs4_file *f)
444 {
445 	struct nfsd_file *ret;
446 
447 	lockdep_assert_held(&f->fi_lock);
448 
449 	ret = __nfs4_get_fd(f, O_WRONLY);
450 	if (!ret)
451 		ret = __nfs4_get_fd(f, O_RDWR);
452 	return ret;
453 }
454 
455 static struct nfsd_file *
456 find_writeable_file(struct nfs4_file *f)
457 {
458 	struct nfsd_file *ret;
459 
460 	spin_lock(&f->fi_lock);
461 	ret = find_writeable_file_locked(f);
462 	spin_unlock(&f->fi_lock);
463 
464 	return ret;
465 }
466 
467 static struct nfsd_file *
468 find_readable_file_locked(struct nfs4_file *f)
469 {
470 	struct nfsd_file *ret;
471 
472 	lockdep_assert_held(&f->fi_lock);
473 
474 	ret = __nfs4_get_fd(f, O_RDONLY);
475 	if (!ret)
476 		ret = __nfs4_get_fd(f, O_RDWR);
477 	return ret;
478 }
479 
480 static struct nfsd_file *
481 find_readable_file(struct nfs4_file *f)
482 {
483 	struct nfsd_file *ret;
484 
485 	spin_lock(&f->fi_lock);
486 	ret = find_readable_file_locked(f);
487 	spin_unlock(&f->fi_lock);
488 
489 	return ret;
490 }
491 
492 struct nfsd_file *
493 find_any_file(struct nfs4_file *f)
494 {
495 	struct nfsd_file *ret;
496 
497 	if (!f)
498 		return NULL;
499 	spin_lock(&f->fi_lock);
500 	ret = __nfs4_get_fd(f, O_RDWR);
501 	if (!ret) {
502 		ret = __nfs4_get_fd(f, O_WRONLY);
503 		if (!ret)
504 			ret = __nfs4_get_fd(f, O_RDONLY);
505 	}
506 	spin_unlock(&f->fi_lock);
507 	return ret;
508 }
509 
510 static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
511 {
512 	struct nfsd_file *ret = NULL;
513 
514 	spin_lock(&f->fi_lock);
515 	if (f->fi_deleg_file)
516 		ret = nfsd_file_get(f->fi_deleg_file);
517 	spin_unlock(&f->fi_lock);
518 	return ret;
519 }
520 
521 static atomic_long_t num_delegations;
522 unsigned long max_delegations;
523 
524 /*
525  * Open owner state (share locks)
526  */
527 
528 /* hash tables for lock and open owners */
529 #define OWNER_HASH_BITS              8
530 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
531 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
532 
533 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
534 {
535 	unsigned int ret;
536 
537 	ret = opaque_hashval(ownername->data, ownername->len);
538 	return ret & OWNER_HASH_MASK;
539 }
540 
541 /* hash table for nfs4_file */
542 #define FILE_HASH_BITS                   8
543 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
544 
545 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
546 {
547 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
548 }
549 
550 static unsigned int file_hashval(struct knfsd_fh *fh)
551 {
552 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
553 }
554 
555 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
556 
557 static void
558 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
559 {
560 	lockdep_assert_held(&fp->fi_lock);
561 
562 	if (access & NFS4_SHARE_ACCESS_WRITE)
563 		atomic_inc(&fp->fi_access[O_WRONLY]);
564 	if (access & NFS4_SHARE_ACCESS_READ)
565 		atomic_inc(&fp->fi_access[O_RDONLY]);
566 }
567 
568 static __be32
569 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
570 {
571 	lockdep_assert_held(&fp->fi_lock);
572 
573 	/* Does this access mode make sense? */
574 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
575 		return nfserr_inval;
576 
577 	/* Does it conflict with a deny mode already set? */
578 	if ((access & fp->fi_share_deny) != 0)
579 		return nfserr_share_denied;
580 
581 	__nfs4_file_get_access(fp, access);
582 	return nfs_ok;
583 }
584 
585 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
586 {
587 	/* Common case is that there is no deny mode. */
588 	if (deny) {
589 		/* Does this deny mode make sense? */
590 		if (deny & ~NFS4_SHARE_DENY_BOTH)
591 			return nfserr_inval;
592 
593 		if ((deny & NFS4_SHARE_DENY_READ) &&
594 		    atomic_read(&fp->fi_access[O_RDONLY]))
595 			return nfserr_share_denied;
596 
597 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
598 		    atomic_read(&fp->fi_access[O_WRONLY]))
599 			return nfserr_share_denied;
600 	}
601 	return nfs_ok;
602 }
603 
604 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
605 {
606 	might_lock(&fp->fi_lock);
607 
608 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
609 		struct nfsd_file *f1 = NULL;
610 		struct nfsd_file *f2 = NULL;
611 
612 		swap(f1, fp->fi_fds[oflag]);
613 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
614 			swap(f2, fp->fi_fds[O_RDWR]);
615 		spin_unlock(&fp->fi_lock);
616 		if (f1)
617 			nfsd_file_put(f1);
618 		if (f2)
619 			nfsd_file_put(f2);
620 	}
621 }
622 
623 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
624 {
625 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
626 
627 	if (access & NFS4_SHARE_ACCESS_WRITE)
628 		__nfs4_file_put_access(fp, O_WRONLY);
629 	if (access & NFS4_SHARE_ACCESS_READ)
630 		__nfs4_file_put_access(fp, O_RDONLY);
631 }
632 
633 /*
634  * Allocate a new open/delegation state counter. This is needed for
635  * pNFS for proper return on close semantics.
636  *
637  * Note that we only allocate it for pNFS-enabled exports, otherwise
638  * all pointers to struct nfs4_clnt_odstate are always NULL.
639  */
640 static struct nfs4_clnt_odstate *
641 alloc_clnt_odstate(struct nfs4_client *clp)
642 {
643 	struct nfs4_clnt_odstate *co;
644 
645 	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
646 	if (co) {
647 		co->co_client = clp;
648 		refcount_set(&co->co_odcount, 1);
649 	}
650 	return co;
651 }
652 
653 static void
654 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
655 {
656 	struct nfs4_file *fp = co->co_file;
657 
658 	lockdep_assert_held(&fp->fi_lock);
659 	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
660 }
661 
662 static inline void
663 get_clnt_odstate(struct nfs4_clnt_odstate *co)
664 {
665 	if (co)
666 		refcount_inc(&co->co_odcount);
667 }
668 
669 static void
670 put_clnt_odstate(struct nfs4_clnt_odstate *co)
671 {
672 	struct nfs4_file *fp;
673 
674 	if (!co)
675 		return;
676 
677 	fp = co->co_file;
678 	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
679 		list_del(&co->co_perfile);
680 		spin_unlock(&fp->fi_lock);
681 
682 		nfsd4_return_all_file_layouts(co->co_client, fp);
683 		kmem_cache_free(odstate_slab, co);
684 	}
685 }
686 
687 static struct nfs4_clnt_odstate *
688 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
689 {
690 	struct nfs4_clnt_odstate *co;
691 	struct nfs4_client *cl;
692 
693 	if (!new)
694 		return NULL;
695 
696 	cl = new->co_client;
697 
698 	spin_lock(&fp->fi_lock);
699 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
700 		if (co->co_client == cl) {
701 			get_clnt_odstate(co);
702 			goto out;
703 		}
704 	}
705 	co = new;
706 	co->co_file = fp;
707 	hash_clnt_odstate_locked(new);
708 out:
709 	spin_unlock(&fp->fi_lock);
710 	return co;
711 }
712 
713 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
714 				  void (*sc_free)(struct nfs4_stid *))
715 {
716 	struct nfs4_stid *stid;
717 	int new_id;
718 
719 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
720 	if (!stid)
721 		return NULL;
722 
723 	idr_preload(GFP_KERNEL);
724 	spin_lock(&cl->cl_lock);
725 	/* Reserving 0 for start of file in nfsdfs "states" file: */
726 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
727 	spin_unlock(&cl->cl_lock);
728 	idr_preload_end();
729 	if (new_id < 0)
730 		goto out_free;
731 
732 	stid->sc_free = sc_free;
733 	stid->sc_client = cl;
734 	stid->sc_stateid.si_opaque.so_id = new_id;
735 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
736 	/* Will be incremented before return to client: */
737 	refcount_set(&stid->sc_count, 1);
738 	spin_lock_init(&stid->sc_lock);
739 	INIT_LIST_HEAD(&stid->sc_cp_list);
740 
741 	/*
742 	 * It shouldn't be a problem to reuse an opaque stateid value.
743 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
744 	 * example, a stray write retransmission could be accepted by
745 	 * the server when it should have been rejected.  Therefore,
746 	 * adopt a trick from the sctp code to attempt to maximize the
747 	 * amount of time until an id is reused, by ensuring they always
748 	 * "increase" (mod INT_MAX):
749 	 */
750 	return stid;
751 out_free:
752 	kmem_cache_free(slab, stid);
753 	return NULL;
754 }
755 
756 /*
757  * Create a unique stateid_t to represent each COPY.
758  */
759 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
760 			      unsigned char sc_type)
761 {
762 	int new_id;
763 
764 	stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
765 	stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
766 	stid->sc_type = sc_type;
767 
768 	idr_preload(GFP_KERNEL);
769 	spin_lock(&nn->s2s_cp_lock);
770 	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
771 	stid->stid.si_opaque.so_id = new_id;
772 	stid->stid.si_generation = 1;
773 	spin_unlock(&nn->s2s_cp_lock);
774 	idr_preload_end();
775 	if (new_id < 0)
776 		return 0;
777 	return 1;
778 }
779 
780 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
781 {
782 	return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
783 }
784 
785 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
786 						     struct nfs4_stid *p_stid)
787 {
788 	struct nfs4_cpntf_state *cps;
789 
790 	cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
791 	if (!cps)
792 		return NULL;
793 	cps->cpntf_time = ktime_get_boottime_seconds();
794 	refcount_set(&cps->cp_stateid.sc_count, 1);
795 	if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
796 		goto out_free;
797 	spin_lock(&nn->s2s_cp_lock);
798 	list_add(&cps->cp_list, &p_stid->sc_cp_list);
799 	spin_unlock(&nn->s2s_cp_lock);
800 	return cps;
801 out_free:
802 	kfree(cps);
803 	return NULL;
804 }
805 
806 void nfs4_free_copy_state(struct nfsd4_copy *copy)
807 {
808 	struct nfsd_net *nn;
809 
810 	WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
811 	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
812 	spin_lock(&nn->s2s_cp_lock);
813 	idr_remove(&nn->s2s_cp_stateids,
814 		   copy->cp_stateid.stid.si_opaque.so_id);
815 	spin_unlock(&nn->s2s_cp_lock);
816 }
817 
818 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
819 {
820 	struct nfs4_cpntf_state *cps;
821 	struct nfsd_net *nn;
822 
823 	nn = net_generic(net, nfsd_net_id);
824 	spin_lock(&nn->s2s_cp_lock);
825 	while (!list_empty(&stid->sc_cp_list)) {
826 		cps = list_first_entry(&stid->sc_cp_list,
827 				       struct nfs4_cpntf_state, cp_list);
828 		_free_cpntf_state_locked(nn, cps);
829 	}
830 	spin_unlock(&nn->s2s_cp_lock);
831 }
832 
833 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
834 {
835 	struct nfs4_stid *stid;
836 
837 	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
838 	if (!stid)
839 		return NULL;
840 
841 	return openlockstateid(stid);
842 }
843 
844 static void nfs4_free_deleg(struct nfs4_stid *stid)
845 {
846 	kmem_cache_free(deleg_slab, stid);
847 	atomic_long_dec(&num_delegations);
848 }
849 
850 /*
851  * When we recall a delegation, we should be careful not to hand it
852  * out again straight away.
853  * To ensure this we keep a pair of bloom filters ('new' and 'old')
854  * in which the filehandles of recalled delegations are "stored".
855  * If a filehandle appear in either filter, a delegation is blocked.
856  * When a delegation is recalled, the filehandle is stored in the "new"
857  * filter.
858  * Every 30 seconds we swap the filters and clear the "new" one,
859  * unless both are empty of course.
860  *
861  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
862  * low 3 bytes as hash-table indices.
863  *
864  * 'blocked_delegations_lock', which is always taken in block_delegations(),
865  * is used to manage concurrent access.  Testing does not need the lock
866  * except when swapping the two filters.
867  */
868 static DEFINE_SPINLOCK(blocked_delegations_lock);
869 static struct bloom_pair {
870 	int	entries, old_entries;
871 	time64_t swap_time;
872 	int	new; /* index into 'set' */
873 	DECLARE_BITMAP(set[2], 256);
874 } blocked_delegations;
875 
876 static int delegation_blocked(struct knfsd_fh *fh)
877 {
878 	u32 hash;
879 	struct bloom_pair *bd = &blocked_delegations;
880 
881 	if (bd->entries == 0)
882 		return 0;
883 	if (ktime_get_seconds() - bd->swap_time > 30) {
884 		spin_lock(&blocked_delegations_lock);
885 		if (ktime_get_seconds() - bd->swap_time > 30) {
886 			bd->entries -= bd->old_entries;
887 			bd->old_entries = bd->entries;
888 			memset(bd->set[bd->new], 0,
889 			       sizeof(bd->set[0]));
890 			bd->new = 1-bd->new;
891 			bd->swap_time = ktime_get_seconds();
892 		}
893 		spin_unlock(&blocked_delegations_lock);
894 	}
895 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
896 	if (test_bit(hash&255, bd->set[0]) &&
897 	    test_bit((hash>>8)&255, bd->set[0]) &&
898 	    test_bit((hash>>16)&255, bd->set[0]))
899 		return 1;
900 
901 	if (test_bit(hash&255, bd->set[1]) &&
902 	    test_bit((hash>>8)&255, bd->set[1]) &&
903 	    test_bit((hash>>16)&255, bd->set[1]))
904 		return 1;
905 
906 	return 0;
907 }
908 
909 static void block_delegations(struct knfsd_fh *fh)
910 {
911 	u32 hash;
912 	struct bloom_pair *bd = &blocked_delegations;
913 
914 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
915 
916 	spin_lock(&blocked_delegations_lock);
917 	__set_bit(hash&255, bd->set[bd->new]);
918 	__set_bit((hash>>8)&255, bd->set[bd->new]);
919 	__set_bit((hash>>16)&255, bd->set[bd->new]);
920 	if (bd->entries == 0)
921 		bd->swap_time = ktime_get_seconds();
922 	bd->entries += 1;
923 	spin_unlock(&blocked_delegations_lock);
924 }
925 
926 static struct nfs4_delegation *
927 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
928 		 struct svc_fh *current_fh,
929 		 struct nfs4_clnt_odstate *odstate)
930 {
931 	struct nfs4_delegation *dp;
932 	long n;
933 
934 	dprintk("NFSD alloc_init_deleg\n");
935 	n = atomic_long_inc_return(&num_delegations);
936 	if (n < 0 || n > max_delegations)
937 		goto out_dec;
938 	if (delegation_blocked(&current_fh->fh_handle))
939 		goto out_dec;
940 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
941 	if (dp == NULL)
942 		goto out_dec;
943 
944 	/*
945 	 * delegation seqid's are never incremented.  The 4.1 special
946 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
947 	 * 0 anyway just for consistency and use 1:
948 	 */
949 	dp->dl_stid.sc_stateid.si_generation = 1;
950 	INIT_LIST_HEAD(&dp->dl_perfile);
951 	INIT_LIST_HEAD(&dp->dl_perclnt);
952 	INIT_LIST_HEAD(&dp->dl_recall_lru);
953 	dp->dl_clnt_odstate = odstate;
954 	get_clnt_odstate(odstate);
955 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
956 	dp->dl_retries = 1;
957 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
958 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
959 	get_nfs4_file(fp);
960 	dp->dl_stid.sc_file = fp;
961 	return dp;
962 out_dec:
963 	atomic_long_dec(&num_delegations);
964 	return NULL;
965 }
966 
967 void
968 nfs4_put_stid(struct nfs4_stid *s)
969 {
970 	struct nfs4_file *fp = s->sc_file;
971 	struct nfs4_client *clp = s->sc_client;
972 
973 	might_lock(&clp->cl_lock);
974 
975 	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
976 		wake_up_all(&close_wq);
977 		return;
978 	}
979 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
980 	nfs4_free_cpntf_statelist(clp->net, s);
981 	spin_unlock(&clp->cl_lock);
982 	s->sc_free(s);
983 	if (fp)
984 		put_nfs4_file(fp);
985 }
986 
987 void
988 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
989 {
990 	stateid_t *src = &stid->sc_stateid;
991 
992 	spin_lock(&stid->sc_lock);
993 	if (unlikely(++src->si_generation == 0))
994 		src->si_generation = 1;
995 	memcpy(dst, src, sizeof(*dst));
996 	spin_unlock(&stid->sc_lock);
997 }
998 
999 static void put_deleg_file(struct nfs4_file *fp)
1000 {
1001 	struct nfsd_file *nf = NULL;
1002 
1003 	spin_lock(&fp->fi_lock);
1004 	if (--fp->fi_delegees == 0)
1005 		swap(nf, fp->fi_deleg_file);
1006 	spin_unlock(&fp->fi_lock);
1007 
1008 	if (nf)
1009 		nfsd_file_put(nf);
1010 }
1011 
1012 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1013 {
1014 	struct nfs4_file *fp = dp->dl_stid.sc_file;
1015 	struct nfsd_file *nf = fp->fi_deleg_file;
1016 
1017 	WARN_ON_ONCE(!fp->fi_delegees);
1018 
1019 	vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1020 	put_deleg_file(fp);
1021 }
1022 
1023 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1024 {
1025 	put_clnt_odstate(dp->dl_clnt_odstate);
1026 	nfs4_unlock_deleg_lease(dp);
1027 	nfs4_put_stid(&dp->dl_stid);
1028 }
1029 
1030 void nfs4_unhash_stid(struct nfs4_stid *s)
1031 {
1032 	s->sc_type = 0;
1033 }
1034 
1035 /**
1036  * nfs4_delegation_exists - Discover if this delegation already exists
1037  * @clp:     a pointer to the nfs4_client we're granting a delegation to
1038  * @fp:      a pointer to the nfs4_file we're granting a delegation on
1039  *
1040  * Return:
1041  *      On success: true iff an existing delegation is found
1042  */
1043 
1044 static bool
1045 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1046 {
1047 	struct nfs4_delegation *searchdp = NULL;
1048 	struct nfs4_client *searchclp = NULL;
1049 
1050 	lockdep_assert_held(&state_lock);
1051 	lockdep_assert_held(&fp->fi_lock);
1052 
1053 	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1054 		searchclp = searchdp->dl_stid.sc_client;
1055 		if (clp == searchclp) {
1056 			return true;
1057 		}
1058 	}
1059 	return false;
1060 }
1061 
1062 /**
1063  * hash_delegation_locked - Add a delegation to the appropriate lists
1064  * @dp:     a pointer to the nfs4_delegation we are adding.
1065  * @fp:     a pointer to the nfs4_file we're granting a delegation on
1066  *
1067  * Return:
1068  *      On success: NULL if the delegation was successfully hashed.
1069  *
1070  *      On error: -EAGAIN if one was previously granted to this
1071  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
1072  *
1073  */
1074 
1075 static int
1076 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1077 {
1078 	struct nfs4_client *clp = dp->dl_stid.sc_client;
1079 
1080 	lockdep_assert_held(&state_lock);
1081 	lockdep_assert_held(&fp->fi_lock);
1082 
1083 	if (nfs4_delegation_exists(clp, fp))
1084 		return -EAGAIN;
1085 	refcount_inc(&dp->dl_stid.sc_count);
1086 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
1087 	list_add(&dp->dl_perfile, &fp->fi_delegations);
1088 	list_add(&dp->dl_perclnt, &clp->cl_delegations);
1089 	return 0;
1090 }
1091 
1092 static bool
1093 unhash_delegation_locked(struct nfs4_delegation *dp)
1094 {
1095 	struct nfs4_file *fp = dp->dl_stid.sc_file;
1096 
1097 	lockdep_assert_held(&state_lock);
1098 
1099 	if (list_empty(&dp->dl_perfile))
1100 		return false;
1101 
1102 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1103 	/* Ensure that deleg break won't try to requeue it */
1104 	++dp->dl_time;
1105 	spin_lock(&fp->fi_lock);
1106 	list_del_init(&dp->dl_perclnt);
1107 	list_del_init(&dp->dl_recall_lru);
1108 	list_del_init(&dp->dl_perfile);
1109 	spin_unlock(&fp->fi_lock);
1110 	return true;
1111 }
1112 
1113 static void destroy_delegation(struct nfs4_delegation *dp)
1114 {
1115 	bool unhashed;
1116 
1117 	spin_lock(&state_lock);
1118 	unhashed = unhash_delegation_locked(dp);
1119 	spin_unlock(&state_lock);
1120 	if (unhashed)
1121 		destroy_unhashed_deleg(dp);
1122 }
1123 
1124 static void revoke_delegation(struct nfs4_delegation *dp)
1125 {
1126 	struct nfs4_client *clp = dp->dl_stid.sc_client;
1127 
1128 	WARN_ON(!list_empty(&dp->dl_recall_lru));
1129 
1130 	if (clp->cl_minorversion) {
1131 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1132 		refcount_inc(&dp->dl_stid.sc_count);
1133 		spin_lock(&clp->cl_lock);
1134 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1135 		spin_unlock(&clp->cl_lock);
1136 	}
1137 	destroy_unhashed_deleg(dp);
1138 }
1139 
1140 /*
1141  * SETCLIENTID state
1142  */
1143 
1144 static unsigned int clientid_hashval(u32 id)
1145 {
1146 	return id & CLIENT_HASH_MASK;
1147 }
1148 
1149 static unsigned int clientstr_hashval(struct xdr_netobj name)
1150 {
1151 	return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1152 }
1153 
1154 /*
1155  * We store the NONE, READ, WRITE, and BOTH bits separately in the
1156  * st_{access,deny}_bmap field of the stateid, in order to track not
1157  * only what share bits are currently in force, but also what
1158  * combinations of share bits previous opens have used.  This allows us
1159  * to enforce the recommendation of rfc 3530 14.2.19 that the server
1160  * return an error if the client attempt to downgrade to a combination
1161  * of share bits not explicable by closing some of its previous opens.
1162  *
1163  * XXX: This enforcement is actually incomplete, since we don't keep
1164  * track of access/deny bit combinations; so, e.g., we allow:
1165  *
1166  *	OPEN allow read, deny write
1167  *	OPEN allow both, deny none
1168  *	DOWNGRADE allow read, deny none
1169  *
1170  * which we should reject.
1171  */
1172 static unsigned int
1173 bmap_to_share_mode(unsigned long bmap) {
1174 	int i;
1175 	unsigned int access = 0;
1176 
1177 	for (i = 1; i < 4; i++) {
1178 		if (test_bit(i, &bmap))
1179 			access |= i;
1180 	}
1181 	return access;
1182 }
1183 
1184 /* set share access for a given stateid */
1185 static inline void
1186 set_access(u32 access, struct nfs4_ol_stateid *stp)
1187 {
1188 	unsigned char mask = 1 << access;
1189 
1190 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1191 	stp->st_access_bmap |= mask;
1192 }
1193 
1194 /* clear share access for a given stateid */
1195 static inline void
1196 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1197 {
1198 	unsigned char mask = 1 << access;
1199 
1200 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1201 	stp->st_access_bmap &= ~mask;
1202 }
1203 
1204 /* test whether a given stateid has access */
1205 static inline bool
1206 test_access(u32 access, struct nfs4_ol_stateid *stp)
1207 {
1208 	unsigned char mask = 1 << access;
1209 
1210 	return (bool)(stp->st_access_bmap & mask);
1211 }
1212 
1213 /* set share deny for a given stateid */
1214 static inline void
1215 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1216 {
1217 	unsigned char mask = 1 << deny;
1218 
1219 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1220 	stp->st_deny_bmap |= mask;
1221 }
1222 
1223 /* clear share deny for a given stateid */
1224 static inline void
1225 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1226 {
1227 	unsigned char mask = 1 << deny;
1228 
1229 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1230 	stp->st_deny_bmap &= ~mask;
1231 }
1232 
1233 /* test whether a given stateid is denying specific access */
1234 static inline bool
1235 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1236 {
1237 	unsigned char mask = 1 << deny;
1238 
1239 	return (bool)(stp->st_deny_bmap & mask);
1240 }
1241 
1242 static int nfs4_access_to_omode(u32 access)
1243 {
1244 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
1245 	case NFS4_SHARE_ACCESS_READ:
1246 		return O_RDONLY;
1247 	case NFS4_SHARE_ACCESS_WRITE:
1248 		return O_WRONLY;
1249 	case NFS4_SHARE_ACCESS_BOTH:
1250 		return O_RDWR;
1251 	}
1252 	WARN_ON_ONCE(1);
1253 	return O_RDONLY;
1254 }
1255 
1256 /*
1257  * A stateid that had a deny mode associated with it is being released
1258  * or downgraded. Recalculate the deny mode on the file.
1259  */
1260 static void
1261 recalculate_deny_mode(struct nfs4_file *fp)
1262 {
1263 	struct nfs4_ol_stateid *stp;
1264 
1265 	spin_lock(&fp->fi_lock);
1266 	fp->fi_share_deny = 0;
1267 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1268 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1269 	spin_unlock(&fp->fi_lock);
1270 }
1271 
1272 static void
1273 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1274 {
1275 	int i;
1276 	bool change = false;
1277 
1278 	for (i = 1; i < 4; i++) {
1279 		if ((i & deny) != i) {
1280 			change = true;
1281 			clear_deny(i, stp);
1282 		}
1283 	}
1284 
1285 	/* Recalculate per-file deny mode if there was a change */
1286 	if (change)
1287 		recalculate_deny_mode(stp->st_stid.sc_file);
1288 }
1289 
1290 /* release all access and file references for a given stateid */
1291 static void
1292 release_all_access(struct nfs4_ol_stateid *stp)
1293 {
1294 	int i;
1295 	struct nfs4_file *fp = stp->st_stid.sc_file;
1296 
1297 	if (fp && stp->st_deny_bmap != 0)
1298 		recalculate_deny_mode(fp);
1299 
1300 	for (i = 1; i < 4; i++) {
1301 		if (test_access(i, stp))
1302 			nfs4_file_put_access(stp->st_stid.sc_file, i);
1303 		clear_access(i, stp);
1304 	}
1305 }
1306 
1307 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1308 {
1309 	kfree(sop->so_owner.data);
1310 	sop->so_ops->so_free(sop);
1311 }
1312 
1313 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1314 {
1315 	struct nfs4_client *clp = sop->so_client;
1316 
1317 	might_lock(&clp->cl_lock);
1318 
1319 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1320 		return;
1321 	sop->so_ops->so_unhash(sop);
1322 	spin_unlock(&clp->cl_lock);
1323 	nfs4_free_stateowner(sop);
1324 }
1325 
1326 static bool
1327 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1328 {
1329 	return list_empty(&stp->st_perfile);
1330 }
1331 
1332 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1333 {
1334 	struct nfs4_file *fp = stp->st_stid.sc_file;
1335 
1336 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1337 
1338 	if (list_empty(&stp->st_perfile))
1339 		return false;
1340 
1341 	spin_lock(&fp->fi_lock);
1342 	list_del_init(&stp->st_perfile);
1343 	spin_unlock(&fp->fi_lock);
1344 	list_del(&stp->st_perstateowner);
1345 	return true;
1346 }
1347 
1348 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1349 {
1350 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1351 
1352 	put_clnt_odstate(stp->st_clnt_odstate);
1353 	release_all_access(stp);
1354 	if (stp->st_stateowner)
1355 		nfs4_put_stateowner(stp->st_stateowner);
1356 	kmem_cache_free(stateid_slab, stid);
1357 }
1358 
1359 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1360 {
1361 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1362 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1363 	struct nfsd_file *nf;
1364 
1365 	nf = find_any_file(stp->st_stid.sc_file);
1366 	if (nf) {
1367 		get_file(nf->nf_file);
1368 		filp_close(nf->nf_file, (fl_owner_t)lo);
1369 		nfsd_file_put(nf);
1370 	}
1371 	nfs4_free_ol_stateid(stid);
1372 }
1373 
1374 /*
1375  * Put the persistent reference to an already unhashed generic stateid, while
1376  * holding the cl_lock. If it's the last reference, then put it onto the
1377  * reaplist for later destruction.
1378  */
1379 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1380 				       struct list_head *reaplist)
1381 {
1382 	struct nfs4_stid *s = &stp->st_stid;
1383 	struct nfs4_client *clp = s->sc_client;
1384 
1385 	lockdep_assert_held(&clp->cl_lock);
1386 
1387 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1388 
1389 	if (!refcount_dec_and_test(&s->sc_count)) {
1390 		wake_up_all(&close_wq);
1391 		return;
1392 	}
1393 
1394 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1395 	list_add(&stp->st_locks, reaplist);
1396 }
1397 
1398 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1399 {
1400 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1401 
1402 	if (!unhash_ol_stateid(stp))
1403 		return false;
1404 	list_del_init(&stp->st_locks);
1405 	nfs4_unhash_stid(&stp->st_stid);
1406 	return true;
1407 }
1408 
1409 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1410 {
1411 	struct nfs4_client *clp = stp->st_stid.sc_client;
1412 	bool unhashed;
1413 
1414 	spin_lock(&clp->cl_lock);
1415 	unhashed = unhash_lock_stateid(stp);
1416 	spin_unlock(&clp->cl_lock);
1417 	if (unhashed)
1418 		nfs4_put_stid(&stp->st_stid);
1419 }
1420 
1421 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1422 {
1423 	struct nfs4_client *clp = lo->lo_owner.so_client;
1424 
1425 	lockdep_assert_held(&clp->cl_lock);
1426 
1427 	list_del_init(&lo->lo_owner.so_strhash);
1428 }
1429 
1430 /*
1431  * Free a list of generic stateids that were collected earlier after being
1432  * fully unhashed.
1433  */
1434 static void
1435 free_ol_stateid_reaplist(struct list_head *reaplist)
1436 {
1437 	struct nfs4_ol_stateid *stp;
1438 	struct nfs4_file *fp;
1439 
1440 	might_sleep();
1441 
1442 	while (!list_empty(reaplist)) {
1443 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1444 				       st_locks);
1445 		list_del(&stp->st_locks);
1446 		fp = stp->st_stid.sc_file;
1447 		stp->st_stid.sc_free(&stp->st_stid);
1448 		if (fp)
1449 			put_nfs4_file(fp);
1450 	}
1451 }
1452 
1453 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1454 				       struct list_head *reaplist)
1455 {
1456 	struct nfs4_ol_stateid *stp;
1457 
1458 	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1459 
1460 	while (!list_empty(&open_stp->st_locks)) {
1461 		stp = list_entry(open_stp->st_locks.next,
1462 				struct nfs4_ol_stateid, st_locks);
1463 		WARN_ON(!unhash_lock_stateid(stp));
1464 		put_ol_stateid_locked(stp, reaplist);
1465 	}
1466 }
1467 
1468 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1469 				struct list_head *reaplist)
1470 {
1471 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1472 
1473 	if (!unhash_ol_stateid(stp))
1474 		return false;
1475 	release_open_stateid_locks(stp, reaplist);
1476 	return true;
1477 }
1478 
1479 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1480 {
1481 	LIST_HEAD(reaplist);
1482 
1483 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1484 	if (unhash_open_stateid(stp, &reaplist))
1485 		put_ol_stateid_locked(stp, &reaplist);
1486 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1487 	free_ol_stateid_reaplist(&reaplist);
1488 }
1489 
1490 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1491 {
1492 	struct nfs4_client *clp = oo->oo_owner.so_client;
1493 
1494 	lockdep_assert_held(&clp->cl_lock);
1495 
1496 	list_del_init(&oo->oo_owner.so_strhash);
1497 	list_del_init(&oo->oo_perclient);
1498 }
1499 
1500 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1501 {
1502 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1503 					  nfsd_net_id);
1504 	struct nfs4_ol_stateid *s;
1505 
1506 	spin_lock(&nn->client_lock);
1507 	s = oo->oo_last_closed_stid;
1508 	if (s) {
1509 		list_del_init(&oo->oo_close_lru);
1510 		oo->oo_last_closed_stid = NULL;
1511 	}
1512 	spin_unlock(&nn->client_lock);
1513 	if (s)
1514 		nfs4_put_stid(&s->st_stid);
1515 }
1516 
1517 static void release_openowner(struct nfs4_openowner *oo)
1518 {
1519 	struct nfs4_ol_stateid *stp;
1520 	struct nfs4_client *clp = oo->oo_owner.so_client;
1521 	struct list_head reaplist;
1522 
1523 	INIT_LIST_HEAD(&reaplist);
1524 
1525 	spin_lock(&clp->cl_lock);
1526 	unhash_openowner_locked(oo);
1527 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1528 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1529 				struct nfs4_ol_stateid, st_perstateowner);
1530 		if (unhash_open_stateid(stp, &reaplist))
1531 			put_ol_stateid_locked(stp, &reaplist);
1532 	}
1533 	spin_unlock(&clp->cl_lock);
1534 	free_ol_stateid_reaplist(&reaplist);
1535 	release_last_closed_stateid(oo);
1536 	nfs4_put_stateowner(&oo->oo_owner);
1537 }
1538 
1539 static inline int
1540 hash_sessionid(struct nfs4_sessionid *sessionid)
1541 {
1542 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1543 
1544 	return sid->sequence % SESSION_HASH_SIZE;
1545 }
1546 
1547 #ifdef CONFIG_SUNRPC_DEBUG
1548 static inline void
1549 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1550 {
1551 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1552 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1553 }
1554 #else
1555 static inline void
1556 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1557 {
1558 }
1559 #endif
1560 
1561 /*
1562  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1563  * won't be used for replay.
1564  */
1565 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1566 {
1567 	struct nfs4_stateowner *so = cstate->replay_owner;
1568 
1569 	if (nfserr == nfserr_replay_me)
1570 		return;
1571 
1572 	if (!seqid_mutating_err(ntohl(nfserr))) {
1573 		nfsd4_cstate_clear_replay(cstate);
1574 		return;
1575 	}
1576 	if (!so)
1577 		return;
1578 	if (so->so_is_open_owner)
1579 		release_last_closed_stateid(openowner(so));
1580 	so->so_seqid++;
1581 	return;
1582 }
1583 
1584 static void
1585 gen_sessionid(struct nfsd4_session *ses)
1586 {
1587 	struct nfs4_client *clp = ses->se_client;
1588 	struct nfsd4_sessionid *sid;
1589 
1590 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1591 	sid->clientid = clp->cl_clientid;
1592 	sid->sequence = current_sessionid++;
1593 	sid->reserved = 0;
1594 }
1595 
1596 /*
1597  * The protocol defines ca_maxresponssize_cached to include the size of
1598  * the rpc header, but all we need to cache is the data starting after
1599  * the end of the initial SEQUENCE operation--the rest we regenerate
1600  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1601  * value that is the number of bytes in our cache plus a few additional
1602  * bytes.  In order to stay on the safe side, and not promise more than
1603  * we can cache, those additional bytes must be the minimum possible: 24
1604  * bytes of rpc header (xid through accept state, with AUTH_NULL
1605  * verifier), 12 for the compound header (with zero-length tag), and 44
1606  * for the SEQUENCE op response:
1607  */
1608 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1609 
1610 static void
1611 free_session_slots(struct nfsd4_session *ses)
1612 {
1613 	int i;
1614 
1615 	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1616 		free_svc_cred(&ses->se_slots[i]->sl_cred);
1617 		kfree(ses->se_slots[i]);
1618 	}
1619 }
1620 
1621 /*
1622  * We don't actually need to cache the rpc and session headers, so we
1623  * can allocate a little less for each slot:
1624  */
1625 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1626 {
1627 	u32 size;
1628 
1629 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1630 		size = 0;
1631 	else
1632 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1633 	return size + sizeof(struct nfsd4_slot);
1634 }
1635 
1636 /*
1637  * XXX: If we run out of reserved DRC memory we could (up to a point)
1638  * re-negotiate active sessions and reduce their slot usage to make
1639  * room for new connections. For now we just fail the create session.
1640  */
1641 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1642 {
1643 	u32 slotsize = slot_bytes(ca);
1644 	u32 num = ca->maxreqs;
1645 	unsigned long avail, total_avail;
1646 	unsigned int scale_factor;
1647 
1648 	spin_lock(&nfsd_drc_lock);
1649 	if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1650 		total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1651 	else
1652 		/* We have handed out more space than we chose in
1653 		 * set_max_drc() to allow.  That isn't really a
1654 		 * problem as long as that doesn't make us think we
1655 		 * have lots more due to integer overflow.
1656 		 */
1657 		total_avail = 0;
1658 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1659 	/*
1660 	 * Never use more than a fraction of the remaining memory,
1661 	 * unless it's the only way to give this client a slot.
1662 	 * The chosen fraction is either 1/8 or 1/number of threads,
1663 	 * whichever is smaller.  This ensures there are adequate
1664 	 * slots to support multiple clients per thread.
1665 	 * Give the client one slot even if that would require
1666 	 * over-allocation--it is better than failure.
1667 	 */
1668 	scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1669 
1670 	avail = clamp_t(unsigned long, avail, slotsize,
1671 			total_avail/scale_factor);
1672 	num = min_t(int, num, avail / slotsize);
1673 	num = max_t(int, num, 1);
1674 	nfsd_drc_mem_used += num * slotsize;
1675 	spin_unlock(&nfsd_drc_lock);
1676 
1677 	return num;
1678 }
1679 
1680 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1681 {
1682 	int slotsize = slot_bytes(ca);
1683 
1684 	spin_lock(&nfsd_drc_lock);
1685 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1686 	spin_unlock(&nfsd_drc_lock);
1687 }
1688 
1689 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1690 					   struct nfsd4_channel_attrs *battrs)
1691 {
1692 	int numslots = fattrs->maxreqs;
1693 	int slotsize = slot_bytes(fattrs);
1694 	struct nfsd4_session *new;
1695 	int mem, i;
1696 
1697 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1698 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1699 	mem = numslots * sizeof(struct nfsd4_slot *);
1700 
1701 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1702 	if (!new)
1703 		return NULL;
1704 	/* allocate each struct nfsd4_slot and data cache in one piece */
1705 	for (i = 0; i < numslots; i++) {
1706 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1707 		if (!new->se_slots[i])
1708 			goto out_free;
1709 	}
1710 
1711 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1712 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1713 
1714 	return new;
1715 out_free:
1716 	while (i--)
1717 		kfree(new->se_slots[i]);
1718 	kfree(new);
1719 	return NULL;
1720 }
1721 
1722 static void free_conn(struct nfsd4_conn *c)
1723 {
1724 	svc_xprt_put(c->cn_xprt);
1725 	kfree(c);
1726 }
1727 
1728 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1729 {
1730 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1731 	struct nfs4_client *clp = c->cn_session->se_client;
1732 
1733 	spin_lock(&clp->cl_lock);
1734 	if (!list_empty(&c->cn_persession)) {
1735 		list_del(&c->cn_persession);
1736 		free_conn(c);
1737 	}
1738 	nfsd4_probe_callback(clp);
1739 	spin_unlock(&clp->cl_lock);
1740 }
1741 
1742 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1743 {
1744 	struct nfsd4_conn *conn;
1745 
1746 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1747 	if (!conn)
1748 		return NULL;
1749 	svc_xprt_get(rqstp->rq_xprt);
1750 	conn->cn_xprt = rqstp->rq_xprt;
1751 	conn->cn_flags = flags;
1752 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1753 	return conn;
1754 }
1755 
1756 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1757 {
1758 	conn->cn_session = ses;
1759 	list_add(&conn->cn_persession, &ses->se_conns);
1760 }
1761 
1762 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1763 {
1764 	struct nfs4_client *clp = ses->se_client;
1765 
1766 	spin_lock(&clp->cl_lock);
1767 	__nfsd4_hash_conn(conn, ses);
1768 	spin_unlock(&clp->cl_lock);
1769 }
1770 
1771 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1772 {
1773 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1774 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1775 }
1776 
1777 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1778 {
1779 	int ret;
1780 
1781 	nfsd4_hash_conn(conn, ses);
1782 	ret = nfsd4_register_conn(conn);
1783 	if (ret)
1784 		/* oops; xprt is already down: */
1785 		nfsd4_conn_lost(&conn->cn_xpt_user);
1786 	/* We may have gained or lost a callback channel: */
1787 	nfsd4_probe_callback_sync(ses->se_client);
1788 }
1789 
1790 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1791 {
1792 	u32 dir = NFS4_CDFC4_FORE;
1793 
1794 	if (cses->flags & SESSION4_BACK_CHAN)
1795 		dir |= NFS4_CDFC4_BACK;
1796 	return alloc_conn(rqstp, dir);
1797 }
1798 
1799 /* must be called under client_lock */
1800 static void nfsd4_del_conns(struct nfsd4_session *s)
1801 {
1802 	struct nfs4_client *clp = s->se_client;
1803 	struct nfsd4_conn *c;
1804 
1805 	spin_lock(&clp->cl_lock);
1806 	while (!list_empty(&s->se_conns)) {
1807 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1808 		list_del_init(&c->cn_persession);
1809 		spin_unlock(&clp->cl_lock);
1810 
1811 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1812 		free_conn(c);
1813 
1814 		spin_lock(&clp->cl_lock);
1815 	}
1816 	spin_unlock(&clp->cl_lock);
1817 }
1818 
1819 static void __free_session(struct nfsd4_session *ses)
1820 {
1821 	free_session_slots(ses);
1822 	kfree(ses);
1823 }
1824 
1825 static void free_session(struct nfsd4_session *ses)
1826 {
1827 	nfsd4_del_conns(ses);
1828 	nfsd4_put_drc_mem(&ses->se_fchannel);
1829 	__free_session(ses);
1830 }
1831 
1832 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1833 {
1834 	int idx;
1835 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1836 
1837 	new->se_client = clp;
1838 	gen_sessionid(new);
1839 
1840 	INIT_LIST_HEAD(&new->se_conns);
1841 
1842 	new->se_cb_seq_nr = 1;
1843 	new->se_flags = cses->flags;
1844 	new->se_cb_prog = cses->callback_prog;
1845 	new->se_cb_sec = cses->cb_sec;
1846 	atomic_set(&new->se_ref, 0);
1847 	idx = hash_sessionid(&new->se_sessionid);
1848 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1849 	spin_lock(&clp->cl_lock);
1850 	list_add(&new->se_perclnt, &clp->cl_sessions);
1851 	spin_unlock(&clp->cl_lock);
1852 
1853 	{
1854 		struct sockaddr *sa = svc_addr(rqstp);
1855 		/*
1856 		 * This is a little silly; with sessions there's no real
1857 		 * use for the callback address.  Use the peer address
1858 		 * as a reasonable default for now, but consider fixing
1859 		 * the rpc client not to require an address in the
1860 		 * future:
1861 		 */
1862 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1863 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1864 	}
1865 }
1866 
1867 /* caller must hold client_lock */
1868 static struct nfsd4_session *
1869 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1870 {
1871 	struct nfsd4_session *elem;
1872 	int idx;
1873 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1874 
1875 	lockdep_assert_held(&nn->client_lock);
1876 
1877 	dump_sessionid(__func__, sessionid);
1878 	idx = hash_sessionid(sessionid);
1879 	/* Search in the appropriate list */
1880 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1881 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1882 			    NFS4_MAX_SESSIONID_LEN)) {
1883 			return elem;
1884 		}
1885 	}
1886 
1887 	dprintk("%s: session not found\n", __func__);
1888 	return NULL;
1889 }
1890 
1891 static struct nfsd4_session *
1892 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1893 		__be32 *ret)
1894 {
1895 	struct nfsd4_session *session;
1896 	__be32 status = nfserr_badsession;
1897 
1898 	session = __find_in_sessionid_hashtbl(sessionid, net);
1899 	if (!session)
1900 		goto out;
1901 	status = nfsd4_get_session_locked(session);
1902 	if (status)
1903 		session = NULL;
1904 out:
1905 	*ret = status;
1906 	return session;
1907 }
1908 
1909 /* caller must hold client_lock */
1910 static void
1911 unhash_session(struct nfsd4_session *ses)
1912 {
1913 	struct nfs4_client *clp = ses->se_client;
1914 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1915 
1916 	lockdep_assert_held(&nn->client_lock);
1917 
1918 	list_del(&ses->se_hash);
1919 	spin_lock(&ses->se_client->cl_lock);
1920 	list_del(&ses->se_perclnt);
1921 	spin_unlock(&ses->se_client->cl_lock);
1922 }
1923 
1924 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1925 static int
1926 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1927 {
1928 	/*
1929 	 * We're assuming the clid was not given out from a boot
1930 	 * precisely 2^32 (about 136 years) before this one.  That seems
1931 	 * a safe assumption:
1932 	 */
1933 	if (clid->cl_boot == (u32)nn->boot_time)
1934 		return 0;
1935 	trace_nfsd_clid_stale(clid);
1936 	return 1;
1937 }
1938 
1939 /*
1940  * XXX Should we use a slab cache ?
1941  * This type of memory management is somewhat inefficient, but we use it
1942  * anyway since SETCLIENTID is not a common operation.
1943  */
1944 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1945 {
1946 	struct nfs4_client *clp;
1947 	int i;
1948 
1949 	clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1950 	if (clp == NULL)
1951 		return NULL;
1952 	xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1953 	if (clp->cl_name.data == NULL)
1954 		goto err_no_name;
1955 	clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1956 						 sizeof(struct list_head),
1957 						 GFP_KERNEL);
1958 	if (!clp->cl_ownerstr_hashtbl)
1959 		goto err_no_hashtbl;
1960 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1961 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1962 	INIT_LIST_HEAD(&clp->cl_sessions);
1963 	idr_init(&clp->cl_stateids);
1964 	atomic_set(&clp->cl_rpc_users, 0);
1965 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1966 	INIT_LIST_HEAD(&clp->cl_idhash);
1967 	INIT_LIST_HEAD(&clp->cl_openowners);
1968 	INIT_LIST_HEAD(&clp->cl_delegations);
1969 	INIT_LIST_HEAD(&clp->cl_lru);
1970 	INIT_LIST_HEAD(&clp->cl_revoked);
1971 #ifdef CONFIG_NFSD_PNFS
1972 	INIT_LIST_HEAD(&clp->cl_lo_states);
1973 #endif
1974 	INIT_LIST_HEAD(&clp->async_copies);
1975 	spin_lock_init(&clp->async_lock);
1976 	spin_lock_init(&clp->cl_lock);
1977 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1978 	return clp;
1979 err_no_hashtbl:
1980 	kfree(clp->cl_name.data);
1981 err_no_name:
1982 	kmem_cache_free(client_slab, clp);
1983 	return NULL;
1984 }
1985 
1986 static void __free_client(struct kref *k)
1987 {
1988 	struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
1989 	struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
1990 
1991 	free_svc_cred(&clp->cl_cred);
1992 	kfree(clp->cl_ownerstr_hashtbl);
1993 	kfree(clp->cl_name.data);
1994 	kfree(clp->cl_nii_domain.data);
1995 	kfree(clp->cl_nii_name.data);
1996 	idr_destroy(&clp->cl_stateids);
1997 	kmem_cache_free(client_slab, clp);
1998 }
1999 
2000 static void drop_client(struct nfs4_client *clp)
2001 {
2002 	kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2003 }
2004 
2005 static void
2006 free_client(struct nfs4_client *clp)
2007 {
2008 	while (!list_empty(&clp->cl_sessions)) {
2009 		struct nfsd4_session *ses;
2010 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2011 				se_perclnt);
2012 		list_del(&ses->se_perclnt);
2013 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
2014 		free_session(ses);
2015 	}
2016 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2017 	if (clp->cl_nfsd_dentry) {
2018 		nfsd_client_rmdir(clp->cl_nfsd_dentry);
2019 		clp->cl_nfsd_dentry = NULL;
2020 		wake_up_all(&expiry_wq);
2021 	}
2022 	drop_client(clp);
2023 }
2024 
2025 /* must be called under the client_lock */
2026 static void
2027 unhash_client_locked(struct nfs4_client *clp)
2028 {
2029 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2030 	struct nfsd4_session *ses;
2031 
2032 	lockdep_assert_held(&nn->client_lock);
2033 
2034 	/* Mark the client as expired! */
2035 	clp->cl_time = 0;
2036 	/* Make it invisible */
2037 	if (!list_empty(&clp->cl_idhash)) {
2038 		list_del_init(&clp->cl_idhash);
2039 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2040 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2041 		else
2042 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2043 	}
2044 	list_del_init(&clp->cl_lru);
2045 	spin_lock(&clp->cl_lock);
2046 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2047 		list_del_init(&ses->se_hash);
2048 	spin_unlock(&clp->cl_lock);
2049 }
2050 
2051 static void
2052 unhash_client(struct nfs4_client *clp)
2053 {
2054 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2055 
2056 	spin_lock(&nn->client_lock);
2057 	unhash_client_locked(clp);
2058 	spin_unlock(&nn->client_lock);
2059 }
2060 
2061 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2062 {
2063 	if (atomic_read(&clp->cl_rpc_users))
2064 		return nfserr_jukebox;
2065 	unhash_client_locked(clp);
2066 	return nfs_ok;
2067 }
2068 
2069 static void
2070 __destroy_client(struct nfs4_client *clp)
2071 {
2072 	int i;
2073 	struct nfs4_openowner *oo;
2074 	struct nfs4_delegation *dp;
2075 	struct list_head reaplist;
2076 
2077 	INIT_LIST_HEAD(&reaplist);
2078 	spin_lock(&state_lock);
2079 	while (!list_empty(&clp->cl_delegations)) {
2080 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2081 		WARN_ON(!unhash_delegation_locked(dp));
2082 		list_add(&dp->dl_recall_lru, &reaplist);
2083 	}
2084 	spin_unlock(&state_lock);
2085 	while (!list_empty(&reaplist)) {
2086 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2087 		list_del_init(&dp->dl_recall_lru);
2088 		destroy_unhashed_deleg(dp);
2089 	}
2090 	while (!list_empty(&clp->cl_revoked)) {
2091 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2092 		list_del_init(&dp->dl_recall_lru);
2093 		nfs4_put_stid(&dp->dl_stid);
2094 	}
2095 	while (!list_empty(&clp->cl_openowners)) {
2096 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2097 		nfs4_get_stateowner(&oo->oo_owner);
2098 		release_openowner(oo);
2099 	}
2100 	for (i = 0; i < OWNER_HASH_SIZE; i++) {
2101 		struct nfs4_stateowner *so, *tmp;
2102 
2103 		list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2104 					 so_strhash) {
2105 			/* Should be no openowners at this point */
2106 			WARN_ON_ONCE(so->so_is_open_owner);
2107 			remove_blocked_locks(lockowner(so));
2108 		}
2109 	}
2110 	nfsd4_return_all_client_layouts(clp);
2111 	nfsd4_shutdown_copy(clp);
2112 	nfsd4_shutdown_callback(clp);
2113 	if (clp->cl_cb_conn.cb_xprt)
2114 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2115 	free_client(clp);
2116 	wake_up_all(&expiry_wq);
2117 }
2118 
2119 static void
2120 destroy_client(struct nfs4_client *clp)
2121 {
2122 	unhash_client(clp);
2123 	__destroy_client(clp);
2124 }
2125 
2126 static void inc_reclaim_complete(struct nfs4_client *clp)
2127 {
2128 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2129 
2130 	if (!nn->track_reclaim_completes)
2131 		return;
2132 	if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2133 		return;
2134 	if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2135 			nn->reclaim_str_hashtbl_size) {
2136 		printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2137 				clp->net->ns.inum);
2138 		nfsd4_end_grace(nn);
2139 	}
2140 }
2141 
2142 static void expire_client(struct nfs4_client *clp)
2143 {
2144 	unhash_client(clp);
2145 	nfsd4_client_record_remove(clp);
2146 	__destroy_client(clp);
2147 }
2148 
2149 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2150 {
2151 	memcpy(target->cl_verifier.data, source->data,
2152 			sizeof(target->cl_verifier.data));
2153 }
2154 
2155 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2156 {
2157 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2158 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2159 }
2160 
2161 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2162 {
2163 	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2164 	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2165 								GFP_KERNEL);
2166 	target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2167 	if ((source->cr_principal && !target->cr_principal) ||
2168 	    (source->cr_raw_principal && !target->cr_raw_principal) ||
2169 	    (source->cr_targ_princ && !target->cr_targ_princ))
2170 		return -ENOMEM;
2171 
2172 	target->cr_flavor = source->cr_flavor;
2173 	target->cr_uid = source->cr_uid;
2174 	target->cr_gid = source->cr_gid;
2175 	target->cr_group_info = source->cr_group_info;
2176 	get_group_info(target->cr_group_info);
2177 	target->cr_gss_mech = source->cr_gss_mech;
2178 	if (source->cr_gss_mech)
2179 		gss_mech_get(source->cr_gss_mech);
2180 	return 0;
2181 }
2182 
2183 static int
2184 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2185 {
2186 	if (o1->len < o2->len)
2187 		return -1;
2188 	if (o1->len > o2->len)
2189 		return 1;
2190 	return memcmp(o1->data, o2->data, o1->len);
2191 }
2192 
2193 static int
2194 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2195 {
2196 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2197 }
2198 
2199 static int
2200 same_clid(clientid_t *cl1, clientid_t *cl2)
2201 {
2202 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2203 }
2204 
2205 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2206 {
2207 	int i;
2208 
2209 	if (g1->ngroups != g2->ngroups)
2210 		return false;
2211 	for (i=0; i<g1->ngroups; i++)
2212 		if (!gid_eq(g1->gid[i], g2->gid[i]))
2213 			return false;
2214 	return true;
2215 }
2216 
2217 /*
2218  * RFC 3530 language requires clid_inuse be returned when the
2219  * "principal" associated with a requests differs from that previously
2220  * used.  We use uid, gid's, and gss principal string as our best
2221  * approximation.  We also don't want to allow non-gss use of a client
2222  * established using gss: in theory cr_principal should catch that
2223  * change, but in practice cr_principal can be null even in the gss case
2224  * since gssd doesn't always pass down a principal string.
2225  */
2226 static bool is_gss_cred(struct svc_cred *cr)
2227 {
2228 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
2229 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2230 }
2231 
2232 
2233 static bool
2234 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2235 {
2236 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2237 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2238 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2239 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2240 		return false;
2241 	/* XXX: check that cr_targ_princ fields match ? */
2242 	if (cr1->cr_principal == cr2->cr_principal)
2243 		return true;
2244 	if (!cr1->cr_principal || !cr2->cr_principal)
2245 		return false;
2246 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2247 }
2248 
2249 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2250 {
2251 	struct svc_cred *cr = &rqstp->rq_cred;
2252 	u32 service;
2253 
2254 	if (!cr->cr_gss_mech)
2255 		return false;
2256 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2257 	return service == RPC_GSS_SVC_INTEGRITY ||
2258 	       service == RPC_GSS_SVC_PRIVACY;
2259 }
2260 
2261 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2262 {
2263 	struct svc_cred *cr = &rqstp->rq_cred;
2264 
2265 	if (!cl->cl_mach_cred)
2266 		return true;
2267 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2268 		return false;
2269 	if (!svc_rqst_integrity_protected(rqstp))
2270 		return false;
2271 	if (cl->cl_cred.cr_raw_principal)
2272 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2273 						cr->cr_raw_principal);
2274 	if (!cr->cr_principal)
2275 		return false;
2276 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2277 }
2278 
2279 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2280 {
2281 	__be32 verf[2];
2282 
2283 	/*
2284 	 * This is opaque to client, so no need to byte-swap. Use
2285 	 * __force to keep sparse happy
2286 	 */
2287 	verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2288 	verf[1] = (__force __be32)nn->clverifier_counter++;
2289 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2290 }
2291 
2292 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2293 {
2294 	clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2295 	clp->cl_clientid.cl_id = nn->clientid_counter++;
2296 	gen_confirm(clp, nn);
2297 }
2298 
2299 static struct nfs4_stid *
2300 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2301 {
2302 	struct nfs4_stid *ret;
2303 
2304 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2305 	if (!ret || !ret->sc_type)
2306 		return NULL;
2307 	return ret;
2308 }
2309 
2310 static struct nfs4_stid *
2311 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2312 {
2313 	struct nfs4_stid *s;
2314 
2315 	spin_lock(&cl->cl_lock);
2316 	s = find_stateid_locked(cl, t);
2317 	if (s != NULL) {
2318 		if (typemask & s->sc_type)
2319 			refcount_inc(&s->sc_count);
2320 		else
2321 			s = NULL;
2322 	}
2323 	spin_unlock(&cl->cl_lock);
2324 	return s;
2325 }
2326 
2327 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2328 {
2329 	struct nfsdfs_client *nc;
2330 	nc = get_nfsdfs_client(inode);
2331 	if (!nc)
2332 		return NULL;
2333 	return container_of(nc, struct nfs4_client, cl_nfsdfs);
2334 }
2335 
2336 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2337 {
2338 	seq_printf(m, "\"");
2339 	seq_escape_mem_ascii(m, data, len);
2340 	seq_printf(m, "\"");
2341 }
2342 
2343 static int client_info_show(struct seq_file *m, void *v)
2344 {
2345 	struct inode *inode = m->private;
2346 	struct nfs4_client *clp;
2347 	u64 clid;
2348 
2349 	clp = get_nfsdfs_clp(inode);
2350 	if (!clp)
2351 		return -ENXIO;
2352 	memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2353 	seq_printf(m, "clientid: 0x%llx\n", clid);
2354 	seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2355 	seq_printf(m, "name: ");
2356 	seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2357 	seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2358 	if (clp->cl_nii_domain.data) {
2359 		seq_printf(m, "Implementation domain: ");
2360 		seq_quote_mem(m, clp->cl_nii_domain.data,
2361 					clp->cl_nii_domain.len);
2362 		seq_printf(m, "\nImplementation name: ");
2363 		seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2364 		seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2365 			clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2366 	}
2367 	drop_client(clp);
2368 
2369 	return 0;
2370 }
2371 
2372 static int client_info_open(struct inode *inode, struct file *file)
2373 {
2374 	return single_open(file, client_info_show, inode);
2375 }
2376 
2377 static const struct file_operations client_info_fops = {
2378 	.open		= client_info_open,
2379 	.read		= seq_read,
2380 	.llseek		= seq_lseek,
2381 	.release	= single_release,
2382 };
2383 
2384 static void *states_start(struct seq_file *s, loff_t *pos)
2385 	__acquires(&clp->cl_lock)
2386 {
2387 	struct nfs4_client *clp = s->private;
2388 	unsigned long id = *pos;
2389 	void *ret;
2390 
2391 	spin_lock(&clp->cl_lock);
2392 	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2393 	*pos = id;
2394 	return ret;
2395 }
2396 
2397 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2398 {
2399 	struct nfs4_client *clp = s->private;
2400 	unsigned long id = *pos;
2401 	void *ret;
2402 
2403 	id = *pos;
2404 	id++;
2405 	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2406 	*pos = id;
2407 	return ret;
2408 }
2409 
2410 static void states_stop(struct seq_file *s, void *v)
2411 	__releases(&clp->cl_lock)
2412 {
2413 	struct nfs4_client *clp = s->private;
2414 
2415 	spin_unlock(&clp->cl_lock);
2416 }
2417 
2418 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2419 {
2420          seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2421 }
2422 
2423 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2424 {
2425 	struct inode *inode = f->nf_inode;
2426 
2427 	seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2428 					MAJOR(inode->i_sb->s_dev),
2429 					 MINOR(inode->i_sb->s_dev),
2430 					 inode->i_ino);
2431 }
2432 
2433 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2434 {
2435 	seq_printf(s, "owner: ");
2436 	seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2437 }
2438 
2439 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2440 {
2441 	seq_printf(s, "0x%.8x", stid->si_generation);
2442 	seq_printf(s, "%12phN", &stid->si_opaque);
2443 }
2444 
2445 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2446 {
2447 	struct nfs4_ol_stateid *ols;
2448 	struct nfs4_file *nf;
2449 	struct nfsd_file *file;
2450 	struct nfs4_stateowner *oo;
2451 	unsigned int access, deny;
2452 
2453 	if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2454 		return 0; /* XXX: or SEQ_SKIP? */
2455 	ols = openlockstateid(st);
2456 	oo = ols->st_stateowner;
2457 	nf = st->sc_file;
2458 	file = find_any_file(nf);
2459 	if (!file)
2460 		return 0;
2461 
2462 	seq_printf(s, "- ");
2463 	nfs4_show_stateid(s, &st->sc_stateid);
2464 	seq_printf(s, ": { type: open, ");
2465 
2466 	access = bmap_to_share_mode(ols->st_access_bmap);
2467 	deny   = bmap_to_share_mode(ols->st_deny_bmap);
2468 
2469 	seq_printf(s, "access: %s%s, ",
2470 		access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2471 		access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2472 	seq_printf(s, "deny: %s%s, ",
2473 		deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2474 		deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2475 
2476 	nfs4_show_superblock(s, file);
2477 	seq_printf(s, ", ");
2478 	nfs4_show_fname(s, file);
2479 	seq_printf(s, ", ");
2480 	nfs4_show_owner(s, oo);
2481 	seq_printf(s, " }\n");
2482 	nfsd_file_put(file);
2483 
2484 	return 0;
2485 }
2486 
2487 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2488 {
2489 	struct nfs4_ol_stateid *ols;
2490 	struct nfs4_file *nf;
2491 	struct nfsd_file *file;
2492 	struct nfs4_stateowner *oo;
2493 
2494 	ols = openlockstateid(st);
2495 	oo = ols->st_stateowner;
2496 	nf = st->sc_file;
2497 	file = find_any_file(nf);
2498 	if (!file)
2499 		return 0;
2500 
2501 	seq_printf(s, "- ");
2502 	nfs4_show_stateid(s, &st->sc_stateid);
2503 	seq_printf(s, ": { type: lock, ");
2504 
2505 	/*
2506 	 * Note: a lock stateid isn't really the same thing as a lock,
2507 	 * it's the locking state held by one owner on a file, and there
2508 	 * may be multiple (or no) lock ranges associated with it.
2509 	 * (Same for the matter is true of open stateids.)
2510 	 */
2511 
2512 	nfs4_show_superblock(s, file);
2513 	/* XXX: open stateid? */
2514 	seq_printf(s, ", ");
2515 	nfs4_show_fname(s, file);
2516 	seq_printf(s, ", ");
2517 	nfs4_show_owner(s, oo);
2518 	seq_printf(s, " }\n");
2519 	nfsd_file_put(file);
2520 
2521 	return 0;
2522 }
2523 
2524 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2525 {
2526 	struct nfs4_delegation *ds;
2527 	struct nfs4_file *nf;
2528 	struct nfsd_file *file;
2529 
2530 	ds = delegstateid(st);
2531 	nf = st->sc_file;
2532 	file = find_deleg_file(nf);
2533 	if (!file)
2534 		return 0;
2535 
2536 	seq_printf(s, "- ");
2537 	nfs4_show_stateid(s, &st->sc_stateid);
2538 	seq_printf(s, ": { type: deleg, ");
2539 
2540 	/* Kinda dead code as long as we only support read delegs: */
2541 	seq_printf(s, "access: %s, ",
2542 		ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2543 
2544 	/* XXX: lease time, whether it's being recalled. */
2545 
2546 	nfs4_show_superblock(s, file);
2547 	seq_printf(s, ", ");
2548 	nfs4_show_fname(s, file);
2549 	seq_printf(s, " }\n");
2550 	nfsd_file_put(file);
2551 
2552 	return 0;
2553 }
2554 
2555 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2556 {
2557 	struct nfs4_layout_stateid *ls;
2558 	struct nfsd_file *file;
2559 
2560 	ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2561 	file = ls->ls_file;
2562 
2563 	seq_printf(s, "- ");
2564 	nfs4_show_stateid(s, &st->sc_stateid);
2565 	seq_printf(s, ": { type: layout, ");
2566 
2567 	/* XXX: What else would be useful? */
2568 
2569 	nfs4_show_superblock(s, file);
2570 	seq_printf(s, ", ");
2571 	nfs4_show_fname(s, file);
2572 	seq_printf(s, " }\n");
2573 
2574 	return 0;
2575 }
2576 
2577 static int states_show(struct seq_file *s, void *v)
2578 {
2579 	struct nfs4_stid *st = v;
2580 
2581 	switch (st->sc_type) {
2582 	case NFS4_OPEN_STID:
2583 		return nfs4_show_open(s, st);
2584 	case NFS4_LOCK_STID:
2585 		return nfs4_show_lock(s, st);
2586 	case NFS4_DELEG_STID:
2587 		return nfs4_show_deleg(s, st);
2588 	case NFS4_LAYOUT_STID:
2589 		return nfs4_show_layout(s, st);
2590 	default:
2591 		return 0; /* XXX: or SEQ_SKIP? */
2592 	}
2593 	/* XXX: copy stateids? */
2594 }
2595 
2596 static struct seq_operations states_seq_ops = {
2597 	.start = states_start,
2598 	.next = states_next,
2599 	.stop = states_stop,
2600 	.show = states_show
2601 };
2602 
2603 static int client_states_open(struct inode *inode, struct file *file)
2604 {
2605 	struct seq_file *s;
2606 	struct nfs4_client *clp;
2607 	int ret;
2608 
2609 	clp = get_nfsdfs_clp(inode);
2610 	if (!clp)
2611 		return -ENXIO;
2612 
2613 	ret = seq_open(file, &states_seq_ops);
2614 	if (ret)
2615 		return ret;
2616 	s = file->private_data;
2617 	s->private = clp;
2618 	return 0;
2619 }
2620 
2621 static int client_opens_release(struct inode *inode, struct file *file)
2622 {
2623 	struct seq_file *m = file->private_data;
2624 	struct nfs4_client *clp = m->private;
2625 
2626 	/* XXX: alternatively, we could get/drop in seq start/stop */
2627 	drop_client(clp);
2628 	return 0;
2629 }
2630 
2631 static const struct file_operations client_states_fops = {
2632 	.open		= client_states_open,
2633 	.read		= seq_read,
2634 	.llseek		= seq_lseek,
2635 	.release	= client_opens_release,
2636 };
2637 
2638 /*
2639  * Normally we refuse to destroy clients that are in use, but here the
2640  * administrator is telling us to just do it.  We also want to wait
2641  * so the caller has a guarantee that the client's locks are gone by
2642  * the time the write returns:
2643  */
2644 static void force_expire_client(struct nfs4_client *clp)
2645 {
2646 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2647 	bool already_expired;
2648 
2649 	spin_lock(&clp->cl_lock);
2650 	clp->cl_time = 0;
2651 	spin_unlock(&clp->cl_lock);
2652 
2653 	wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2654 	spin_lock(&nn->client_lock);
2655 	already_expired = list_empty(&clp->cl_lru);
2656 	if (!already_expired)
2657 		unhash_client_locked(clp);
2658 	spin_unlock(&nn->client_lock);
2659 
2660 	if (!already_expired)
2661 		expire_client(clp);
2662 	else
2663 		wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2664 }
2665 
2666 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2667 				   size_t size, loff_t *pos)
2668 {
2669 	char *data;
2670 	struct nfs4_client *clp;
2671 
2672 	data = simple_transaction_get(file, buf, size);
2673 	if (IS_ERR(data))
2674 		return PTR_ERR(data);
2675 	if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2676 		return -EINVAL;
2677 	clp = get_nfsdfs_clp(file_inode(file));
2678 	if (!clp)
2679 		return -ENXIO;
2680 	force_expire_client(clp);
2681 	drop_client(clp);
2682 	return 7;
2683 }
2684 
2685 static const struct file_operations client_ctl_fops = {
2686 	.write		= client_ctl_write,
2687 	.release	= simple_transaction_release,
2688 };
2689 
2690 static const struct tree_descr client_files[] = {
2691 	[0] = {"info", &client_info_fops, S_IRUSR},
2692 	[1] = {"states", &client_states_fops, S_IRUSR},
2693 	[2] = {"ctl", &client_ctl_fops, S_IWUSR},
2694 	[3] = {""},
2695 };
2696 
2697 static struct nfs4_client *create_client(struct xdr_netobj name,
2698 		struct svc_rqst *rqstp, nfs4_verifier *verf)
2699 {
2700 	struct nfs4_client *clp;
2701 	struct sockaddr *sa = svc_addr(rqstp);
2702 	int ret;
2703 	struct net *net = SVC_NET(rqstp);
2704 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2705 
2706 	clp = alloc_client(name);
2707 	if (clp == NULL)
2708 		return NULL;
2709 
2710 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2711 	if (ret) {
2712 		free_client(clp);
2713 		return NULL;
2714 	}
2715 	gen_clid(clp, nn);
2716 	kref_init(&clp->cl_nfsdfs.cl_ref);
2717 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2718 	clp->cl_time = ktime_get_boottime_seconds();
2719 	clear_bit(0, &clp->cl_cb_slot_busy);
2720 	copy_verf(clp, verf);
2721 	memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2722 	clp->cl_cb_session = NULL;
2723 	clp->net = net;
2724 	clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs,
2725 			clp->cl_clientid.cl_id - nn->clientid_base,
2726 			client_files);
2727 	if (!clp->cl_nfsd_dentry) {
2728 		free_client(clp);
2729 		return NULL;
2730 	}
2731 	return clp;
2732 }
2733 
2734 static void
2735 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2736 {
2737 	struct rb_node **new = &(root->rb_node), *parent = NULL;
2738 	struct nfs4_client *clp;
2739 
2740 	while (*new) {
2741 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2742 		parent = *new;
2743 
2744 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2745 			new = &((*new)->rb_left);
2746 		else
2747 			new = &((*new)->rb_right);
2748 	}
2749 
2750 	rb_link_node(&new_clp->cl_namenode, parent, new);
2751 	rb_insert_color(&new_clp->cl_namenode, root);
2752 }
2753 
2754 static struct nfs4_client *
2755 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2756 {
2757 	int cmp;
2758 	struct rb_node *node = root->rb_node;
2759 	struct nfs4_client *clp;
2760 
2761 	while (node) {
2762 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2763 		cmp = compare_blob(&clp->cl_name, name);
2764 		if (cmp > 0)
2765 			node = node->rb_left;
2766 		else if (cmp < 0)
2767 			node = node->rb_right;
2768 		else
2769 			return clp;
2770 	}
2771 	return NULL;
2772 }
2773 
2774 static void
2775 add_to_unconfirmed(struct nfs4_client *clp)
2776 {
2777 	unsigned int idhashval;
2778 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2779 
2780 	lockdep_assert_held(&nn->client_lock);
2781 
2782 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2783 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2784 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2785 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2786 	renew_client_locked(clp);
2787 }
2788 
2789 static void
2790 move_to_confirmed(struct nfs4_client *clp)
2791 {
2792 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2793 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2794 
2795 	lockdep_assert_held(&nn->client_lock);
2796 
2797 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2798 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2799 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2800 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
2801 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2802 	renew_client_locked(clp);
2803 }
2804 
2805 static struct nfs4_client *
2806 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2807 {
2808 	struct nfs4_client *clp;
2809 	unsigned int idhashval = clientid_hashval(clid->cl_id);
2810 
2811 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2812 		if (same_clid(&clp->cl_clientid, clid)) {
2813 			if ((bool)clp->cl_minorversion != sessions)
2814 				return NULL;
2815 			renew_client_locked(clp);
2816 			return clp;
2817 		}
2818 	}
2819 	return NULL;
2820 }
2821 
2822 static struct nfs4_client *
2823 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2824 {
2825 	struct list_head *tbl = nn->conf_id_hashtbl;
2826 
2827 	lockdep_assert_held(&nn->client_lock);
2828 	return find_client_in_id_table(tbl, clid, sessions);
2829 }
2830 
2831 static struct nfs4_client *
2832 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2833 {
2834 	struct list_head *tbl = nn->unconf_id_hashtbl;
2835 
2836 	lockdep_assert_held(&nn->client_lock);
2837 	return find_client_in_id_table(tbl, clid, sessions);
2838 }
2839 
2840 static bool clp_used_exchangeid(struct nfs4_client *clp)
2841 {
2842 	return clp->cl_exchange_flags != 0;
2843 }
2844 
2845 static struct nfs4_client *
2846 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2847 {
2848 	lockdep_assert_held(&nn->client_lock);
2849 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
2850 }
2851 
2852 static struct nfs4_client *
2853 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2854 {
2855 	lockdep_assert_held(&nn->client_lock);
2856 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2857 }
2858 
2859 static void
2860 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2861 {
2862 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2863 	struct sockaddr	*sa = svc_addr(rqstp);
2864 	u32 scopeid = rpc_get_scope_id(sa);
2865 	unsigned short expected_family;
2866 
2867 	/* Currently, we only support tcp and tcp6 for the callback channel */
2868 	if (se->se_callback_netid_len == 3 &&
2869 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2870 		expected_family = AF_INET;
2871 	else if (se->se_callback_netid_len == 4 &&
2872 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2873 		expected_family = AF_INET6;
2874 	else
2875 		goto out_err;
2876 
2877 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2878 					    se->se_callback_addr_len,
2879 					    (struct sockaddr *)&conn->cb_addr,
2880 					    sizeof(conn->cb_addr));
2881 
2882 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2883 		goto out_err;
2884 
2885 	if (conn->cb_addr.ss_family == AF_INET6)
2886 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2887 
2888 	conn->cb_prog = se->se_callback_prog;
2889 	conn->cb_ident = se->se_callback_ident;
2890 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2891 	trace_nfsd_cb_args(clp, conn);
2892 	return;
2893 out_err:
2894 	conn->cb_addr.ss_family = AF_UNSPEC;
2895 	conn->cb_addrlen = 0;
2896 	trace_nfsd_cb_nodelegs(clp);
2897 	return;
2898 }
2899 
2900 /*
2901  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2902  */
2903 static void
2904 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2905 {
2906 	struct xdr_buf *buf = resp->xdr.buf;
2907 	struct nfsd4_slot *slot = resp->cstate.slot;
2908 	unsigned int base;
2909 
2910 	dprintk("--> %s slot %p\n", __func__, slot);
2911 
2912 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2913 	slot->sl_opcnt = resp->opcnt;
2914 	slot->sl_status = resp->cstate.status;
2915 	free_svc_cred(&slot->sl_cred);
2916 	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2917 
2918 	if (!nfsd4_cache_this(resp)) {
2919 		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2920 		return;
2921 	}
2922 	slot->sl_flags |= NFSD4_SLOT_CACHED;
2923 
2924 	base = resp->cstate.data_offset;
2925 	slot->sl_datalen = buf->len - base;
2926 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2927 		WARN(1, "%s: sessions DRC could not cache compound\n",
2928 		     __func__);
2929 	return;
2930 }
2931 
2932 /*
2933  * Encode the replay sequence operation from the slot values.
2934  * If cachethis is FALSE encode the uncached rep error on the next
2935  * operation which sets resp->p and increments resp->opcnt for
2936  * nfs4svc_encode_compoundres.
2937  *
2938  */
2939 static __be32
2940 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2941 			  struct nfsd4_compoundres *resp)
2942 {
2943 	struct nfsd4_op *op;
2944 	struct nfsd4_slot *slot = resp->cstate.slot;
2945 
2946 	/* Encode the replayed sequence operation */
2947 	op = &args->ops[resp->opcnt - 1];
2948 	nfsd4_encode_operation(resp, op);
2949 
2950 	if (slot->sl_flags & NFSD4_SLOT_CACHED)
2951 		return op->status;
2952 	if (args->opcnt == 1) {
2953 		/*
2954 		 * The original operation wasn't a solo sequence--we
2955 		 * always cache those--so this retry must not match the
2956 		 * original:
2957 		 */
2958 		op->status = nfserr_seq_false_retry;
2959 	} else {
2960 		op = &args->ops[resp->opcnt++];
2961 		op->status = nfserr_retry_uncached_rep;
2962 		nfsd4_encode_operation(resp, op);
2963 	}
2964 	return op->status;
2965 }
2966 
2967 /*
2968  * The sequence operation is not cached because we can use the slot and
2969  * session values.
2970  */
2971 static __be32
2972 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2973 			 struct nfsd4_sequence *seq)
2974 {
2975 	struct nfsd4_slot *slot = resp->cstate.slot;
2976 	struct xdr_stream *xdr = &resp->xdr;
2977 	__be32 *p;
2978 	__be32 status;
2979 
2980 	dprintk("--> %s slot %p\n", __func__, slot);
2981 
2982 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2983 	if (status)
2984 		return status;
2985 
2986 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2987 	if (!p) {
2988 		WARN_ON_ONCE(1);
2989 		return nfserr_serverfault;
2990 	}
2991 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2992 	xdr_commit_encode(xdr);
2993 
2994 	resp->opcnt = slot->sl_opcnt;
2995 	return slot->sl_status;
2996 }
2997 
2998 /*
2999  * Set the exchange_id flags returned by the server.
3000  */
3001 static void
3002 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3003 {
3004 #ifdef CONFIG_NFSD_PNFS
3005 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3006 #else
3007 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3008 #endif
3009 
3010 	/* Referrals are supported, Migration is not. */
3011 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3012 
3013 	/* set the wire flags to return to client. */
3014 	clid->flags = new->cl_exchange_flags;
3015 }
3016 
3017 static bool client_has_openowners(struct nfs4_client *clp)
3018 {
3019 	struct nfs4_openowner *oo;
3020 
3021 	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3022 		if (!list_empty(&oo->oo_owner.so_stateids))
3023 			return true;
3024 	}
3025 	return false;
3026 }
3027 
3028 static bool client_has_state(struct nfs4_client *clp)
3029 {
3030 	return client_has_openowners(clp)
3031 #ifdef CONFIG_NFSD_PNFS
3032 		|| !list_empty(&clp->cl_lo_states)
3033 #endif
3034 		|| !list_empty(&clp->cl_delegations)
3035 		|| !list_empty(&clp->cl_sessions)
3036 		|| !list_empty(&clp->async_copies);
3037 }
3038 
3039 static __be32 copy_impl_id(struct nfs4_client *clp,
3040 				struct nfsd4_exchange_id *exid)
3041 {
3042 	if (!exid->nii_domain.data)
3043 		return 0;
3044 	xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3045 	if (!clp->cl_nii_domain.data)
3046 		return nfserr_jukebox;
3047 	xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3048 	if (!clp->cl_nii_name.data)
3049 		return nfserr_jukebox;
3050 	clp->cl_nii_time = exid->nii_time;
3051 	return 0;
3052 }
3053 
3054 __be32
3055 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3056 		union nfsd4_op_u *u)
3057 {
3058 	struct nfsd4_exchange_id *exid = &u->exchange_id;
3059 	struct nfs4_client *conf, *new;
3060 	struct nfs4_client *unconf = NULL;
3061 	__be32 status;
3062 	char			addr_str[INET6_ADDRSTRLEN];
3063 	nfs4_verifier		verf = exid->verifier;
3064 	struct sockaddr		*sa = svc_addr(rqstp);
3065 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3066 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3067 
3068 	rpc_ntop(sa, addr_str, sizeof(addr_str));
3069 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3070 		"ip_addr=%s flags %x, spa_how %u\n",
3071 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
3072 		addr_str, exid->flags, exid->spa_how);
3073 
3074 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3075 		return nfserr_inval;
3076 
3077 	new = create_client(exid->clname, rqstp, &verf);
3078 	if (new == NULL)
3079 		return nfserr_jukebox;
3080 	status = copy_impl_id(new, exid);
3081 	if (status)
3082 		goto out_nolock;
3083 
3084 	switch (exid->spa_how) {
3085 	case SP4_MACH_CRED:
3086 		exid->spo_must_enforce[0] = 0;
3087 		exid->spo_must_enforce[1] = (
3088 			1 << (OP_BIND_CONN_TO_SESSION - 32) |
3089 			1 << (OP_EXCHANGE_ID - 32) |
3090 			1 << (OP_CREATE_SESSION - 32) |
3091 			1 << (OP_DESTROY_SESSION - 32) |
3092 			1 << (OP_DESTROY_CLIENTID - 32));
3093 
3094 		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3095 					1 << (OP_OPEN_DOWNGRADE) |
3096 					1 << (OP_LOCKU) |
3097 					1 << (OP_DELEGRETURN));
3098 
3099 		exid->spo_must_allow[1] &= (
3100 					1 << (OP_TEST_STATEID - 32) |
3101 					1 << (OP_FREE_STATEID - 32));
3102 		if (!svc_rqst_integrity_protected(rqstp)) {
3103 			status = nfserr_inval;
3104 			goto out_nolock;
3105 		}
3106 		/*
3107 		 * Sometimes userspace doesn't give us a principal.
3108 		 * Which is a bug, really.  Anyway, we can't enforce
3109 		 * MACH_CRED in that case, better to give up now:
3110 		 */
3111 		if (!new->cl_cred.cr_principal &&
3112 					!new->cl_cred.cr_raw_principal) {
3113 			status = nfserr_serverfault;
3114 			goto out_nolock;
3115 		}
3116 		new->cl_mach_cred = true;
3117 	case SP4_NONE:
3118 		break;
3119 	default:				/* checked by xdr code */
3120 		WARN_ON_ONCE(1);
3121 		fallthrough;
3122 	case SP4_SSV:
3123 		status = nfserr_encr_alg_unsupp;
3124 		goto out_nolock;
3125 	}
3126 
3127 	/* Cases below refer to rfc 5661 section 18.35.4: */
3128 	spin_lock(&nn->client_lock);
3129 	conf = find_confirmed_client_by_name(&exid->clname, nn);
3130 	if (conf) {
3131 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3132 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3133 
3134 		if (update) {
3135 			if (!clp_used_exchangeid(conf)) { /* buggy client */
3136 				status = nfserr_inval;
3137 				goto out;
3138 			}
3139 			if (!nfsd4_mach_creds_match(conf, rqstp)) {
3140 				status = nfserr_wrong_cred;
3141 				goto out;
3142 			}
3143 			if (!creds_match) { /* case 9 */
3144 				status = nfserr_perm;
3145 				goto out;
3146 			}
3147 			if (!verfs_match) { /* case 8 */
3148 				status = nfserr_not_same;
3149 				goto out;
3150 			}
3151 			/* case 6 */
3152 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3153 			goto out_copy;
3154 		}
3155 		if (!creds_match) { /* case 3 */
3156 			if (client_has_state(conf)) {
3157 				status = nfserr_clid_inuse;
3158 				goto out;
3159 			}
3160 			goto out_new;
3161 		}
3162 		if (verfs_match) { /* case 2 */
3163 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3164 			goto out_copy;
3165 		}
3166 		/* case 5, client reboot */
3167 		conf = NULL;
3168 		goto out_new;
3169 	}
3170 
3171 	if (update) { /* case 7 */
3172 		status = nfserr_noent;
3173 		goto out;
3174 	}
3175 
3176 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
3177 	if (unconf) /* case 4, possible retry or client restart */
3178 		unhash_client_locked(unconf);
3179 
3180 	/* case 1 (normal case) */
3181 out_new:
3182 	if (conf) {
3183 		status = mark_client_expired_locked(conf);
3184 		if (status)
3185 			goto out;
3186 	}
3187 	new->cl_minorversion = cstate->minorversion;
3188 	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3189 	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3190 
3191 	add_to_unconfirmed(new);
3192 	swap(new, conf);
3193 out_copy:
3194 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3195 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
3196 
3197 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3198 	nfsd4_set_ex_flags(conf, exid);
3199 
3200 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3201 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3202 	status = nfs_ok;
3203 
3204 out:
3205 	spin_unlock(&nn->client_lock);
3206 out_nolock:
3207 	if (new)
3208 		expire_client(new);
3209 	if (unconf)
3210 		expire_client(unconf);
3211 	return status;
3212 }
3213 
3214 static __be32
3215 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3216 {
3217 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3218 		slot_seqid);
3219 
3220 	/* The slot is in use, and no response has been sent. */
3221 	if (slot_inuse) {
3222 		if (seqid == slot_seqid)
3223 			return nfserr_jukebox;
3224 		else
3225 			return nfserr_seq_misordered;
3226 	}
3227 	/* Note unsigned 32-bit arithmetic handles wraparound: */
3228 	if (likely(seqid == slot_seqid + 1))
3229 		return nfs_ok;
3230 	if (seqid == slot_seqid)
3231 		return nfserr_replay_cache;
3232 	return nfserr_seq_misordered;
3233 }
3234 
3235 /*
3236  * Cache the create session result into the create session single DRC
3237  * slot cache by saving the xdr structure. sl_seqid has been set.
3238  * Do this for solo or embedded create session operations.
3239  */
3240 static void
3241 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3242 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
3243 {
3244 	slot->sl_status = nfserr;
3245 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3246 }
3247 
3248 static __be32
3249 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3250 			    struct nfsd4_clid_slot *slot)
3251 {
3252 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3253 	return slot->sl_status;
3254 }
3255 
3256 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
3257 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3258 			1 +	/* MIN tag is length with zero, only length */ \
3259 			3 +	/* version, opcount, opcode */ \
3260 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3261 				/* seqid, slotID, slotID, cache */ \
3262 			4 ) * sizeof(__be32))
3263 
3264 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3265 			2 +	/* verifier: AUTH_NULL, length 0 */\
3266 			1 +	/* status */ \
3267 			1 +	/* MIN tag is length with zero, only length */ \
3268 			3 +	/* opcount, opcode, opstatus*/ \
3269 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3270 				/* seqid, slotID, slotID, slotID, status */ \
3271 			5 ) * sizeof(__be32))
3272 
3273 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3274 {
3275 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3276 
3277 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3278 		return nfserr_toosmall;
3279 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3280 		return nfserr_toosmall;
3281 	ca->headerpadsz = 0;
3282 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3283 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3284 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3285 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3286 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3287 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3288 	/*
3289 	 * Note decreasing slot size below client's request may make it
3290 	 * difficult for client to function correctly, whereas
3291 	 * decreasing the number of slots will (just?) affect
3292 	 * performance.  When short on memory we therefore prefer to
3293 	 * decrease number of slots instead of their size.  Clients that
3294 	 * request larger slots than they need will get poor results:
3295 	 * Note that we always allow at least one slot, because our
3296 	 * accounting is soft and provides no guarantees either way.
3297 	 */
3298 	ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3299 
3300 	return nfs_ok;
3301 }
3302 
3303 /*
3304  * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3305  * These are based on similar macros in linux/sunrpc/msg_prot.h .
3306  */
3307 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3308 	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3309 
3310 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3311 	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3312 
3313 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
3314 				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3315 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
3316 				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3317 				 sizeof(__be32))
3318 
3319 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3320 {
3321 	ca->headerpadsz = 0;
3322 
3323 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3324 		return nfserr_toosmall;
3325 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3326 		return nfserr_toosmall;
3327 	ca->maxresp_cached = 0;
3328 	if (ca->maxops < 2)
3329 		return nfserr_toosmall;
3330 
3331 	return nfs_ok;
3332 }
3333 
3334 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3335 {
3336 	switch (cbs->flavor) {
3337 	case RPC_AUTH_NULL:
3338 	case RPC_AUTH_UNIX:
3339 		return nfs_ok;
3340 	default:
3341 		/*
3342 		 * GSS case: the spec doesn't allow us to return this
3343 		 * error.  But it also doesn't allow us not to support
3344 		 * GSS.
3345 		 * I'd rather this fail hard than return some error the
3346 		 * client might think it can already handle:
3347 		 */
3348 		return nfserr_encr_alg_unsupp;
3349 	}
3350 }
3351 
3352 __be32
3353 nfsd4_create_session(struct svc_rqst *rqstp,
3354 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3355 {
3356 	struct nfsd4_create_session *cr_ses = &u->create_session;
3357 	struct sockaddr *sa = svc_addr(rqstp);
3358 	struct nfs4_client *conf, *unconf;
3359 	struct nfs4_client *old = NULL;
3360 	struct nfsd4_session *new;
3361 	struct nfsd4_conn *conn;
3362 	struct nfsd4_clid_slot *cs_slot = NULL;
3363 	__be32 status = 0;
3364 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3365 
3366 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3367 		return nfserr_inval;
3368 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3369 	if (status)
3370 		return status;
3371 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3372 	if (status)
3373 		return status;
3374 	status = check_backchannel_attrs(&cr_ses->back_channel);
3375 	if (status)
3376 		goto out_release_drc_mem;
3377 	status = nfserr_jukebox;
3378 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3379 	if (!new)
3380 		goto out_release_drc_mem;
3381 	conn = alloc_conn_from_crses(rqstp, cr_ses);
3382 	if (!conn)
3383 		goto out_free_session;
3384 
3385 	spin_lock(&nn->client_lock);
3386 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3387 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3388 	WARN_ON_ONCE(conf && unconf);
3389 
3390 	if (conf) {
3391 		status = nfserr_wrong_cred;
3392 		if (!nfsd4_mach_creds_match(conf, rqstp))
3393 			goto out_free_conn;
3394 		cs_slot = &conf->cl_cs_slot;
3395 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3396 		if (status) {
3397 			if (status == nfserr_replay_cache)
3398 				status = nfsd4_replay_create_session(cr_ses, cs_slot);
3399 			goto out_free_conn;
3400 		}
3401 	} else if (unconf) {
3402 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3403 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3404 			status = nfserr_clid_inuse;
3405 			goto out_free_conn;
3406 		}
3407 		status = nfserr_wrong_cred;
3408 		if (!nfsd4_mach_creds_match(unconf, rqstp))
3409 			goto out_free_conn;
3410 		cs_slot = &unconf->cl_cs_slot;
3411 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3412 		if (status) {
3413 			/* an unconfirmed replay returns misordered */
3414 			status = nfserr_seq_misordered;
3415 			goto out_free_conn;
3416 		}
3417 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3418 		if (old) {
3419 			status = mark_client_expired_locked(old);
3420 			if (status) {
3421 				old = NULL;
3422 				goto out_free_conn;
3423 			}
3424 		}
3425 		move_to_confirmed(unconf);
3426 		conf = unconf;
3427 	} else {
3428 		status = nfserr_stale_clientid;
3429 		goto out_free_conn;
3430 	}
3431 	status = nfs_ok;
3432 	/* Persistent sessions are not supported */
3433 	cr_ses->flags &= ~SESSION4_PERSIST;
3434 	/* Upshifting from TCP to RDMA is not supported */
3435 	cr_ses->flags &= ~SESSION4_RDMA;
3436 
3437 	init_session(rqstp, new, conf, cr_ses);
3438 	nfsd4_get_session_locked(new);
3439 
3440 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3441 	       NFS4_MAX_SESSIONID_LEN);
3442 	cs_slot->sl_seqid++;
3443 	cr_ses->seqid = cs_slot->sl_seqid;
3444 
3445 	/* cache solo and embedded create sessions under the client_lock */
3446 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
3447 	spin_unlock(&nn->client_lock);
3448 	/* init connection and backchannel */
3449 	nfsd4_init_conn(rqstp, conn, new);
3450 	nfsd4_put_session(new);
3451 	if (old)
3452 		expire_client(old);
3453 	return status;
3454 out_free_conn:
3455 	spin_unlock(&nn->client_lock);
3456 	free_conn(conn);
3457 	if (old)
3458 		expire_client(old);
3459 out_free_session:
3460 	__free_session(new);
3461 out_release_drc_mem:
3462 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
3463 	return status;
3464 }
3465 
3466 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3467 {
3468 	switch (*dir) {
3469 	case NFS4_CDFC4_FORE:
3470 	case NFS4_CDFC4_BACK:
3471 		return nfs_ok;
3472 	case NFS4_CDFC4_FORE_OR_BOTH:
3473 	case NFS4_CDFC4_BACK_OR_BOTH:
3474 		*dir = NFS4_CDFC4_BOTH;
3475 		return nfs_ok;
3476 	}
3477 	return nfserr_inval;
3478 }
3479 
3480 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3481 		struct nfsd4_compound_state *cstate,
3482 		union nfsd4_op_u *u)
3483 {
3484 	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3485 	struct nfsd4_session *session = cstate->session;
3486 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3487 	__be32 status;
3488 
3489 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3490 	if (status)
3491 		return status;
3492 	spin_lock(&nn->client_lock);
3493 	session->se_cb_prog = bc->bc_cb_program;
3494 	session->se_cb_sec = bc->bc_cb_sec;
3495 	spin_unlock(&nn->client_lock);
3496 
3497 	nfsd4_probe_callback(session->se_client);
3498 
3499 	return nfs_ok;
3500 }
3501 
3502 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3503 {
3504 	struct nfsd4_conn *c;
3505 
3506 	list_for_each_entry(c, &s->se_conns, cn_persession) {
3507 		if (c->cn_xprt == xpt) {
3508 			return c;
3509 		}
3510 	}
3511 	return NULL;
3512 }
3513 
3514 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3515 				struct nfsd4_session *session, u32 req)
3516 {
3517 	struct nfs4_client *clp = session->se_client;
3518 	struct svc_xprt *xpt = rqst->rq_xprt;
3519 	struct nfsd4_conn *c;
3520 	__be32 status;
3521 
3522 	/* Following the last paragraph of RFC 5661 Section 18.34.3: */
3523 	spin_lock(&clp->cl_lock);
3524 	c = __nfsd4_find_conn(xpt, session);
3525 	if (!c)
3526 		status = nfserr_noent;
3527 	else if (req == c->cn_flags)
3528 		status = nfs_ok;
3529 	else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3530 				c->cn_flags != NFS4_CDFC4_BACK)
3531 		status = nfs_ok;
3532 	else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3533 				c->cn_flags != NFS4_CDFC4_FORE)
3534 		status = nfs_ok;
3535 	else
3536 		status = nfserr_inval;
3537 	spin_unlock(&clp->cl_lock);
3538 	return status;
3539 }
3540 
3541 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3542 		     struct nfsd4_compound_state *cstate,
3543 		     union nfsd4_op_u *u)
3544 {
3545 	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3546 	__be32 status;
3547 	struct nfsd4_conn *conn;
3548 	struct nfsd4_session *session;
3549 	struct net *net = SVC_NET(rqstp);
3550 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3551 
3552 	if (!nfsd4_last_compound_op(rqstp))
3553 		return nfserr_not_only_op;
3554 	spin_lock(&nn->client_lock);
3555 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3556 	spin_unlock(&nn->client_lock);
3557 	if (!session)
3558 		goto out_no_session;
3559 	status = nfserr_wrong_cred;
3560 	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3561 		goto out;
3562 	status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
3563 	if (status == nfs_ok || status == nfserr_inval)
3564 		goto out;
3565 	status = nfsd4_map_bcts_dir(&bcts->dir);
3566 	if (status)
3567 		goto out;
3568 	conn = alloc_conn(rqstp, bcts->dir);
3569 	status = nfserr_jukebox;
3570 	if (!conn)
3571 		goto out;
3572 	nfsd4_init_conn(rqstp, conn, session);
3573 	status = nfs_ok;
3574 out:
3575 	nfsd4_put_session(session);
3576 out_no_session:
3577 	return status;
3578 }
3579 
3580 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3581 {
3582 	if (!cstate->session)
3583 		return false;
3584 	return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3585 }
3586 
3587 __be32
3588 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3589 		union nfsd4_op_u *u)
3590 {
3591 	struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3592 	struct nfsd4_session *ses;
3593 	__be32 status;
3594 	int ref_held_by_me = 0;
3595 	struct net *net = SVC_NET(r);
3596 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3597 
3598 	status = nfserr_not_only_op;
3599 	if (nfsd4_compound_in_session(cstate, sessionid)) {
3600 		if (!nfsd4_last_compound_op(r))
3601 			goto out;
3602 		ref_held_by_me++;
3603 	}
3604 	dump_sessionid(__func__, sessionid);
3605 	spin_lock(&nn->client_lock);
3606 	ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3607 	if (!ses)
3608 		goto out_client_lock;
3609 	status = nfserr_wrong_cred;
3610 	if (!nfsd4_mach_creds_match(ses->se_client, r))
3611 		goto out_put_session;
3612 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3613 	if (status)
3614 		goto out_put_session;
3615 	unhash_session(ses);
3616 	spin_unlock(&nn->client_lock);
3617 
3618 	nfsd4_probe_callback_sync(ses->se_client);
3619 
3620 	spin_lock(&nn->client_lock);
3621 	status = nfs_ok;
3622 out_put_session:
3623 	nfsd4_put_session_locked(ses);
3624 out_client_lock:
3625 	spin_unlock(&nn->client_lock);
3626 out:
3627 	return status;
3628 }
3629 
3630 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3631 {
3632 	struct nfs4_client *clp = ses->se_client;
3633 	struct nfsd4_conn *c;
3634 	__be32 status = nfs_ok;
3635 	int ret;
3636 
3637 	spin_lock(&clp->cl_lock);
3638 	c = __nfsd4_find_conn(new->cn_xprt, ses);
3639 	if (c)
3640 		goto out_free;
3641 	status = nfserr_conn_not_bound_to_session;
3642 	if (clp->cl_mach_cred)
3643 		goto out_free;
3644 	__nfsd4_hash_conn(new, ses);
3645 	spin_unlock(&clp->cl_lock);
3646 	ret = nfsd4_register_conn(new);
3647 	if (ret)
3648 		/* oops; xprt is already down: */
3649 		nfsd4_conn_lost(&new->cn_xpt_user);
3650 	return nfs_ok;
3651 out_free:
3652 	spin_unlock(&clp->cl_lock);
3653 	free_conn(new);
3654 	return status;
3655 }
3656 
3657 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3658 {
3659 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
3660 
3661 	return args->opcnt > session->se_fchannel.maxops;
3662 }
3663 
3664 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3665 				  struct nfsd4_session *session)
3666 {
3667 	struct xdr_buf *xb = &rqstp->rq_arg;
3668 
3669 	return xb->len > session->se_fchannel.maxreq_sz;
3670 }
3671 
3672 static bool replay_matches_cache(struct svc_rqst *rqstp,
3673 		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3674 {
3675 	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3676 
3677 	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3678 	    (bool)seq->cachethis)
3679 		return false;
3680 	/*
3681 	 * If there's an error then the reply can have fewer ops than
3682 	 * the call.
3683 	 */
3684 	if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3685 		return false;
3686 	/*
3687 	 * But if we cached a reply with *more* ops than the call you're
3688 	 * sending us now, then this new call is clearly not really a
3689 	 * replay of the old one:
3690 	 */
3691 	if (slot->sl_opcnt > argp->opcnt)
3692 		return false;
3693 	/* This is the only check explicitly called by spec: */
3694 	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3695 		return false;
3696 	/*
3697 	 * There may be more comparisons we could actually do, but the
3698 	 * spec doesn't require us to catch every case where the calls
3699 	 * don't match (that would require caching the call as well as
3700 	 * the reply), so we don't bother.
3701 	 */
3702 	return true;
3703 }
3704 
3705 __be32
3706 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3707 		union nfsd4_op_u *u)
3708 {
3709 	struct nfsd4_sequence *seq = &u->sequence;
3710 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3711 	struct xdr_stream *xdr = &resp->xdr;
3712 	struct nfsd4_session *session;
3713 	struct nfs4_client *clp;
3714 	struct nfsd4_slot *slot;
3715 	struct nfsd4_conn *conn;
3716 	__be32 status;
3717 	int buflen;
3718 	struct net *net = SVC_NET(rqstp);
3719 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3720 
3721 	if (resp->opcnt != 1)
3722 		return nfserr_sequence_pos;
3723 
3724 	/*
3725 	 * Will be either used or freed by nfsd4_sequence_check_conn
3726 	 * below.
3727 	 */
3728 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3729 	if (!conn)
3730 		return nfserr_jukebox;
3731 
3732 	spin_lock(&nn->client_lock);
3733 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3734 	if (!session)
3735 		goto out_no_session;
3736 	clp = session->se_client;
3737 
3738 	status = nfserr_too_many_ops;
3739 	if (nfsd4_session_too_many_ops(rqstp, session))
3740 		goto out_put_session;
3741 
3742 	status = nfserr_req_too_big;
3743 	if (nfsd4_request_too_big(rqstp, session))
3744 		goto out_put_session;
3745 
3746 	status = nfserr_badslot;
3747 	if (seq->slotid >= session->se_fchannel.maxreqs)
3748 		goto out_put_session;
3749 
3750 	slot = session->se_slots[seq->slotid];
3751 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
3752 
3753 	/* We do not negotiate the number of slots yet, so set the
3754 	 * maxslots to the session maxreqs which is used to encode
3755 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
3756 	seq->maxslots = session->se_fchannel.maxreqs;
3757 
3758 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3759 					slot->sl_flags & NFSD4_SLOT_INUSE);
3760 	if (status == nfserr_replay_cache) {
3761 		status = nfserr_seq_misordered;
3762 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3763 			goto out_put_session;
3764 		status = nfserr_seq_false_retry;
3765 		if (!replay_matches_cache(rqstp, seq, slot))
3766 			goto out_put_session;
3767 		cstate->slot = slot;
3768 		cstate->session = session;
3769 		cstate->clp = clp;
3770 		/* Return the cached reply status and set cstate->status
3771 		 * for nfsd4_proc_compound processing */
3772 		status = nfsd4_replay_cache_entry(resp, seq);
3773 		cstate->status = nfserr_replay_cache;
3774 		goto out;
3775 	}
3776 	if (status)
3777 		goto out_put_session;
3778 
3779 	status = nfsd4_sequence_check_conn(conn, session);
3780 	conn = NULL;
3781 	if (status)
3782 		goto out_put_session;
3783 
3784 	buflen = (seq->cachethis) ?
3785 			session->se_fchannel.maxresp_cached :
3786 			session->se_fchannel.maxresp_sz;
3787 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3788 				    nfserr_rep_too_big;
3789 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3790 		goto out_put_session;
3791 	svc_reserve(rqstp, buflen);
3792 
3793 	status = nfs_ok;
3794 	/* Success! bump slot seqid */
3795 	slot->sl_seqid = seq->seqid;
3796 	slot->sl_flags |= NFSD4_SLOT_INUSE;
3797 	if (seq->cachethis)
3798 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3799 	else
3800 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3801 
3802 	cstate->slot = slot;
3803 	cstate->session = session;
3804 	cstate->clp = clp;
3805 
3806 out:
3807 	switch (clp->cl_cb_state) {
3808 	case NFSD4_CB_DOWN:
3809 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3810 		break;
3811 	case NFSD4_CB_FAULT:
3812 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3813 		break;
3814 	default:
3815 		seq->status_flags = 0;
3816 	}
3817 	if (!list_empty(&clp->cl_revoked))
3818 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3819 out_no_session:
3820 	if (conn)
3821 		free_conn(conn);
3822 	spin_unlock(&nn->client_lock);
3823 	return status;
3824 out_put_session:
3825 	nfsd4_put_session_locked(session);
3826 	goto out_no_session;
3827 }
3828 
3829 void
3830 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3831 {
3832 	struct nfsd4_compound_state *cs = &resp->cstate;
3833 
3834 	if (nfsd4_has_session(cs)) {
3835 		if (cs->status != nfserr_replay_cache) {
3836 			nfsd4_store_cache_entry(resp);
3837 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3838 		}
3839 		/* Drop session reference that was taken in nfsd4_sequence() */
3840 		nfsd4_put_session(cs->session);
3841 	} else if (cs->clp)
3842 		put_client_renew(cs->clp);
3843 }
3844 
3845 __be32
3846 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3847 		struct nfsd4_compound_state *cstate,
3848 		union nfsd4_op_u *u)
3849 {
3850 	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3851 	struct nfs4_client *conf, *unconf;
3852 	struct nfs4_client *clp = NULL;
3853 	__be32 status = 0;
3854 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3855 
3856 	spin_lock(&nn->client_lock);
3857 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3858 	conf = find_confirmed_client(&dc->clientid, true, nn);
3859 	WARN_ON_ONCE(conf && unconf);
3860 
3861 	if (conf) {
3862 		if (client_has_state(conf)) {
3863 			status = nfserr_clientid_busy;
3864 			goto out;
3865 		}
3866 		status = mark_client_expired_locked(conf);
3867 		if (status)
3868 			goto out;
3869 		clp = conf;
3870 	} else if (unconf)
3871 		clp = unconf;
3872 	else {
3873 		status = nfserr_stale_clientid;
3874 		goto out;
3875 	}
3876 	if (!nfsd4_mach_creds_match(clp, rqstp)) {
3877 		clp = NULL;
3878 		status = nfserr_wrong_cred;
3879 		goto out;
3880 	}
3881 	unhash_client_locked(clp);
3882 out:
3883 	spin_unlock(&nn->client_lock);
3884 	if (clp)
3885 		expire_client(clp);
3886 	return status;
3887 }
3888 
3889 __be32
3890 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3891 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3892 {
3893 	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3894 	__be32 status = 0;
3895 
3896 	if (rc->rca_one_fs) {
3897 		if (!cstate->current_fh.fh_dentry)
3898 			return nfserr_nofilehandle;
3899 		/*
3900 		 * We don't take advantage of the rca_one_fs case.
3901 		 * That's OK, it's optional, we can safely ignore it.
3902 		 */
3903 		return nfs_ok;
3904 	}
3905 
3906 	status = nfserr_complete_already;
3907 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3908 			     &cstate->session->se_client->cl_flags))
3909 		goto out;
3910 
3911 	status = nfserr_stale_clientid;
3912 	if (is_client_expired(cstate->session->se_client))
3913 		/*
3914 		 * The following error isn't really legal.
3915 		 * But we only get here if the client just explicitly
3916 		 * destroyed the client.  Surely it no longer cares what
3917 		 * error it gets back on an operation for the dead
3918 		 * client.
3919 		 */
3920 		goto out;
3921 
3922 	status = nfs_ok;
3923 	nfsd4_client_record_create(cstate->session->se_client);
3924 	inc_reclaim_complete(cstate->session->se_client);
3925 out:
3926 	return status;
3927 }
3928 
3929 __be32
3930 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3931 		  union nfsd4_op_u *u)
3932 {
3933 	struct nfsd4_setclientid *setclid = &u->setclientid;
3934 	struct xdr_netobj 	clname = setclid->se_name;
3935 	nfs4_verifier		clverifier = setclid->se_verf;
3936 	struct nfs4_client	*conf, *new;
3937 	struct nfs4_client	*unconf = NULL;
3938 	__be32 			status;
3939 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3940 
3941 	new = create_client(clname, rqstp, &clverifier);
3942 	if (new == NULL)
3943 		return nfserr_jukebox;
3944 	/* Cases below refer to rfc 3530 section 14.2.33: */
3945 	spin_lock(&nn->client_lock);
3946 	conf = find_confirmed_client_by_name(&clname, nn);
3947 	if (conf && client_has_state(conf)) {
3948 		/* case 0: */
3949 		status = nfserr_clid_inuse;
3950 		if (clp_used_exchangeid(conf))
3951 			goto out;
3952 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3953 			trace_nfsd_clid_inuse_err(conf);
3954 			goto out;
3955 		}
3956 	}
3957 	unconf = find_unconfirmed_client_by_name(&clname, nn);
3958 	if (unconf)
3959 		unhash_client_locked(unconf);
3960 	/* We need to handle only case 1: probable callback update */
3961 	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3962 		copy_clid(new, conf);
3963 		gen_confirm(new, nn);
3964 	}
3965 	new->cl_minorversion = 0;
3966 	gen_callback(new, setclid, rqstp);
3967 	add_to_unconfirmed(new);
3968 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3969 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3970 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3971 	new = NULL;
3972 	status = nfs_ok;
3973 out:
3974 	spin_unlock(&nn->client_lock);
3975 	if (new)
3976 		free_client(new);
3977 	if (unconf)
3978 		expire_client(unconf);
3979 	return status;
3980 }
3981 
3982 
3983 __be32
3984 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3985 			struct nfsd4_compound_state *cstate,
3986 			union nfsd4_op_u *u)
3987 {
3988 	struct nfsd4_setclientid_confirm *setclientid_confirm =
3989 			&u->setclientid_confirm;
3990 	struct nfs4_client *conf, *unconf;
3991 	struct nfs4_client *old = NULL;
3992 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3993 	clientid_t * clid = &setclientid_confirm->sc_clientid;
3994 	__be32 status;
3995 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3996 
3997 	if (STALE_CLIENTID(clid, nn))
3998 		return nfserr_stale_clientid;
3999 
4000 	spin_lock(&nn->client_lock);
4001 	conf = find_confirmed_client(clid, false, nn);
4002 	unconf = find_unconfirmed_client(clid, false, nn);
4003 	/*
4004 	 * We try hard to give out unique clientid's, so if we get an
4005 	 * attempt to confirm the same clientid with a different cred,
4006 	 * the client may be buggy; this should never happen.
4007 	 *
4008 	 * Nevertheless, RFC 7530 recommends INUSE for this case:
4009 	 */
4010 	status = nfserr_clid_inuse;
4011 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
4012 		goto out;
4013 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
4014 		goto out;
4015 	/* cases below refer to rfc 3530 section 14.2.34: */
4016 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4017 		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4018 			/* case 2: probable retransmit */
4019 			status = nfs_ok;
4020 		} else /* case 4: client hasn't noticed we rebooted yet? */
4021 			status = nfserr_stale_clientid;
4022 		goto out;
4023 	}
4024 	status = nfs_ok;
4025 	if (conf) { /* case 1: callback update */
4026 		old = unconf;
4027 		unhash_client_locked(old);
4028 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4029 	} else { /* case 3: normal case; new or rebooted client */
4030 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4031 		if (old) {
4032 			status = nfserr_clid_inuse;
4033 			if (client_has_state(old)
4034 					&& !same_creds(&unconf->cl_cred,
4035 							&old->cl_cred))
4036 				goto out;
4037 			status = mark_client_expired_locked(old);
4038 			if (status) {
4039 				old = NULL;
4040 				goto out;
4041 			}
4042 		}
4043 		move_to_confirmed(unconf);
4044 		conf = unconf;
4045 	}
4046 	get_client_locked(conf);
4047 	spin_unlock(&nn->client_lock);
4048 	nfsd4_probe_callback(conf);
4049 	spin_lock(&nn->client_lock);
4050 	put_client_renew_locked(conf);
4051 out:
4052 	spin_unlock(&nn->client_lock);
4053 	if (old)
4054 		expire_client(old);
4055 	return status;
4056 }
4057 
4058 static struct nfs4_file *nfsd4_alloc_file(void)
4059 {
4060 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
4061 }
4062 
4063 /* OPEN Share state helper functions */
4064 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
4065 				struct nfs4_file *fp)
4066 {
4067 	lockdep_assert_held(&state_lock);
4068 
4069 	refcount_set(&fp->fi_ref, 1);
4070 	spin_lock_init(&fp->fi_lock);
4071 	INIT_LIST_HEAD(&fp->fi_stateids);
4072 	INIT_LIST_HEAD(&fp->fi_delegations);
4073 	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4074 	fh_copy_shallow(&fp->fi_fhandle, fh);
4075 	fp->fi_deleg_file = NULL;
4076 	fp->fi_had_conflict = false;
4077 	fp->fi_share_deny = 0;
4078 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4079 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
4080 #ifdef CONFIG_NFSD_PNFS
4081 	INIT_LIST_HEAD(&fp->fi_lo_states);
4082 	atomic_set(&fp->fi_lo_recalls, 0);
4083 #endif
4084 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4085 }
4086 
4087 void
4088 nfsd4_free_slabs(void)
4089 {
4090 	kmem_cache_destroy(client_slab);
4091 	kmem_cache_destroy(openowner_slab);
4092 	kmem_cache_destroy(lockowner_slab);
4093 	kmem_cache_destroy(file_slab);
4094 	kmem_cache_destroy(stateid_slab);
4095 	kmem_cache_destroy(deleg_slab);
4096 	kmem_cache_destroy(odstate_slab);
4097 }
4098 
4099 int
4100 nfsd4_init_slabs(void)
4101 {
4102 	client_slab = kmem_cache_create("nfsd4_clients",
4103 			sizeof(struct nfs4_client), 0, 0, NULL);
4104 	if (client_slab == NULL)
4105 		goto out;
4106 	openowner_slab = kmem_cache_create("nfsd4_openowners",
4107 			sizeof(struct nfs4_openowner), 0, 0, NULL);
4108 	if (openowner_slab == NULL)
4109 		goto out_free_client_slab;
4110 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4111 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
4112 	if (lockowner_slab == NULL)
4113 		goto out_free_openowner_slab;
4114 	file_slab = kmem_cache_create("nfsd4_files",
4115 			sizeof(struct nfs4_file), 0, 0, NULL);
4116 	if (file_slab == NULL)
4117 		goto out_free_lockowner_slab;
4118 	stateid_slab = kmem_cache_create("nfsd4_stateids",
4119 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4120 	if (stateid_slab == NULL)
4121 		goto out_free_file_slab;
4122 	deleg_slab = kmem_cache_create("nfsd4_delegations",
4123 			sizeof(struct nfs4_delegation), 0, 0, NULL);
4124 	if (deleg_slab == NULL)
4125 		goto out_free_stateid_slab;
4126 	odstate_slab = kmem_cache_create("nfsd4_odstate",
4127 			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4128 	if (odstate_slab == NULL)
4129 		goto out_free_deleg_slab;
4130 	return 0;
4131 
4132 out_free_deleg_slab:
4133 	kmem_cache_destroy(deleg_slab);
4134 out_free_stateid_slab:
4135 	kmem_cache_destroy(stateid_slab);
4136 out_free_file_slab:
4137 	kmem_cache_destroy(file_slab);
4138 out_free_lockowner_slab:
4139 	kmem_cache_destroy(lockowner_slab);
4140 out_free_openowner_slab:
4141 	kmem_cache_destroy(openowner_slab);
4142 out_free_client_slab:
4143 	kmem_cache_destroy(client_slab);
4144 out:
4145 	return -ENOMEM;
4146 }
4147 
4148 static void init_nfs4_replay(struct nfs4_replay *rp)
4149 {
4150 	rp->rp_status = nfserr_serverfault;
4151 	rp->rp_buflen = 0;
4152 	rp->rp_buf = rp->rp_ibuf;
4153 	mutex_init(&rp->rp_mutex);
4154 }
4155 
4156 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4157 		struct nfs4_stateowner *so)
4158 {
4159 	if (!nfsd4_has_session(cstate)) {
4160 		mutex_lock(&so->so_replay.rp_mutex);
4161 		cstate->replay_owner = nfs4_get_stateowner(so);
4162 	}
4163 }
4164 
4165 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4166 {
4167 	struct nfs4_stateowner *so = cstate->replay_owner;
4168 
4169 	if (so != NULL) {
4170 		cstate->replay_owner = NULL;
4171 		mutex_unlock(&so->so_replay.rp_mutex);
4172 		nfs4_put_stateowner(so);
4173 	}
4174 }
4175 
4176 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4177 {
4178 	struct nfs4_stateowner *sop;
4179 
4180 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
4181 	if (!sop)
4182 		return NULL;
4183 
4184 	xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4185 	if (!sop->so_owner.data) {
4186 		kmem_cache_free(slab, sop);
4187 		return NULL;
4188 	}
4189 
4190 	INIT_LIST_HEAD(&sop->so_stateids);
4191 	sop->so_client = clp;
4192 	init_nfs4_replay(&sop->so_replay);
4193 	atomic_set(&sop->so_count, 1);
4194 	return sop;
4195 }
4196 
4197 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4198 {
4199 	lockdep_assert_held(&clp->cl_lock);
4200 
4201 	list_add(&oo->oo_owner.so_strhash,
4202 		 &clp->cl_ownerstr_hashtbl[strhashval]);
4203 	list_add(&oo->oo_perclient, &clp->cl_openowners);
4204 }
4205 
4206 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4207 {
4208 	unhash_openowner_locked(openowner(so));
4209 }
4210 
4211 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4212 {
4213 	struct nfs4_openowner *oo = openowner(so);
4214 
4215 	kmem_cache_free(openowner_slab, oo);
4216 }
4217 
4218 static const struct nfs4_stateowner_operations openowner_ops = {
4219 	.so_unhash =	nfs4_unhash_openowner,
4220 	.so_free =	nfs4_free_openowner,
4221 };
4222 
4223 static struct nfs4_ol_stateid *
4224 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4225 {
4226 	struct nfs4_ol_stateid *local, *ret = NULL;
4227 	struct nfs4_openowner *oo = open->op_openowner;
4228 
4229 	lockdep_assert_held(&fp->fi_lock);
4230 
4231 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4232 		/* ignore lock owners */
4233 		if (local->st_stateowner->so_is_open_owner == 0)
4234 			continue;
4235 		if (local->st_stateowner != &oo->oo_owner)
4236 			continue;
4237 		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4238 			ret = local;
4239 			refcount_inc(&ret->st_stid.sc_count);
4240 			break;
4241 		}
4242 	}
4243 	return ret;
4244 }
4245 
4246 static __be32
4247 nfsd4_verify_open_stid(struct nfs4_stid *s)
4248 {
4249 	__be32 ret = nfs_ok;
4250 
4251 	switch (s->sc_type) {
4252 	default:
4253 		break;
4254 	case 0:
4255 	case NFS4_CLOSED_STID:
4256 	case NFS4_CLOSED_DELEG_STID:
4257 		ret = nfserr_bad_stateid;
4258 		break;
4259 	case NFS4_REVOKED_DELEG_STID:
4260 		ret = nfserr_deleg_revoked;
4261 	}
4262 	return ret;
4263 }
4264 
4265 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4266 static __be32
4267 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4268 {
4269 	__be32 ret;
4270 
4271 	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4272 	ret = nfsd4_verify_open_stid(&stp->st_stid);
4273 	if (ret != nfs_ok)
4274 		mutex_unlock(&stp->st_mutex);
4275 	return ret;
4276 }
4277 
4278 static struct nfs4_ol_stateid *
4279 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4280 {
4281 	struct nfs4_ol_stateid *stp;
4282 	for (;;) {
4283 		spin_lock(&fp->fi_lock);
4284 		stp = nfsd4_find_existing_open(fp, open);
4285 		spin_unlock(&fp->fi_lock);
4286 		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4287 			break;
4288 		nfs4_put_stid(&stp->st_stid);
4289 	}
4290 	return stp;
4291 }
4292 
4293 static struct nfs4_openowner *
4294 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4295 			   struct nfsd4_compound_state *cstate)
4296 {
4297 	struct nfs4_client *clp = cstate->clp;
4298 	struct nfs4_openowner *oo, *ret;
4299 
4300 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4301 	if (!oo)
4302 		return NULL;
4303 	oo->oo_owner.so_ops = &openowner_ops;
4304 	oo->oo_owner.so_is_open_owner = 1;
4305 	oo->oo_owner.so_seqid = open->op_seqid;
4306 	oo->oo_flags = 0;
4307 	if (nfsd4_has_session(cstate))
4308 		oo->oo_flags |= NFS4_OO_CONFIRMED;
4309 	oo->oo_time = 0;
4310 	oo->oo_last_closed_stid = NULL;
4311 	INIT_LIST_HEAD(&oo->oo_close_lru);
4312 	spin_lock(&clp->cl_lock);
4313 	ret = find_openstateowner_str_locked(strhashval, open, clp);
4314 	if (ret == NULL) {
4315 		hash_openowner(oo, clp, strhashval);
4316 		ret = oo;
4317 	} else
4318 		nfs4_free_stateowner(&oo->oo_owner);
4319 
4320 	spin_unlock(&clp->cl_lock);
4321 	return ret;
4322 }
4323 
4324 static struct nfs4_ol_stateid *
4325 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4326 {
4327 
4328 	struct nfs4_openowner *oo = open->op_openowner;
4329 	struct nfs4_ol_stateid *retstp = NULL;
4330 	struct nfs4_ol_stateid *stp;
4331 
4332 	stp = open->op_stp;
4333 	/* We are moving these outside of the spinlocks to avoid the warnings */
4334 	mutex_init(&stp->st_mutex);
4335 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4336 
4337 retry:
4338 	spin_lock(&oo->oo_owner.so_client->cl_lock);
4339 	spin_lock(&fp->fi_lock);
4340 
4341 	retstp = nfsd4_find_existing_open(fp, open);
4342 	if (retstp)
4343 		goto out_unlock;
4344 
4345 	open->op_stp = NULL;
4346 	refcount_inc(&stp->st_stid.sc_count);
4347 	stp->st_stid.sc_type = NFS4_OPEN_STID;
4348 	INIT_LIST_HEAD(&stp->st_locks);
4349 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4350 	get_nfs4_file(fp);
4351 	stp->st_stid.sc_file = fp;
4352 	stp->st_access_bmap = 0;
4353 	stp->st_deny_bmap = 0;
4354 	stp->st_openstp = NULL;
4355 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4356 	list_add(&stp->st_perfile, &fp->fi_stateids);
4357 
4358 out_unlock:
4359 	spin_unlock(&fp->fi_lock);
4360 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
4361 	if (retstp) {
4362 		/* Handle races with CLOSE */
4363 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4364 			nfs4_put_stid(&retstp->st_stid);
4365 			goto retry;
4366 		}
4367 		/* To keep mutex tracking happy */
4368 		mutex_unlock(&stp->st_mutex);
4369 		stp = retstp;
4370 	}
4371 	return stp;
4372 }
4373 
4374 /*
4375  * In the 4.0 case we need to keep the owners around a little while to handle
4376  * CLOSE replay. We still do need to release any file access that is held by
4377  * them before returning however.
4378  */
4379 static void
4380 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4381 {
4382 	struct nfs4_ol_stateid *last;
4383 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
4384 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4385 						nfsd_net_id);
4386 
4387 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4388 
4389 	/*
4390 	 * We know that we hold one reference via nfsd4_close, and another
4391 	 * "persistent" reference for the client. If the refcount is higher
4392 	 * than 2, then there are still calls in progress that are using this
4393 	 * stateid. We can't put the sc_file reference until they are finished.
4394 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
4395 	 * there should be no danger of the refcount going back up again at
4396 	 * this point.
4397 	 */
4398 	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4399 
4400 	release_all_access(s);
4401 	if (s->st_stid.sc_file) {
4402 		put_nfs4_file(s->st_stid.sc_file);
4403 		s->st_stid.sc_file = NULL;
4404 	}
4405 
4406 	spin_lock(&nn->client_lock);
4407 	last = oo->oo_last_closed_stid;
4408 	oo->oo_last_closed_stid = s;
4409 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4410 	oo->oo_time = ktime_get_boottime_seconds();
4411 	spin_unlock(&nn->client_lock);
4412 	if (last)
4413 		nfs4_put_stid(&last->st_stid);
4414 }
4415 
4416 /* search file_hashtbl[] for file */
4417 static struct nfs4_file *
4418 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
4419 {
4420 	struct nfs4_file *fp;
4421 
4422 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4423 				lockdep_is_held(&state_lock)) {
4424 		if (fh_match(&fp->fi_fhandle, fh)) {
4425 			if (refcount_inc_not_zero(&fp->fi_ref))
4426 				return fp;
4427 		}
4428 	}
4429 	return NULL;
4430 }
4431 
4432 struct nfs4_file *
4433 find_file(struct knfsd_fh *fh)
4434 {
4435 	struct nfs4_file *fp;
4436 	unsigned int hashval = file_hashval(fh);
4437 
4438 	rcu_read_lock();
4439 	fp = find_file_locked(fh, hashval);
4440 	rcu_read_unlock();
4441 	return fp;
4442 }
4443 
4444 static struct nfs4_file *
4445 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
4446 {
4447 	struct nfs4_file *fp;
4448 	unsigned int hashval = file_hashval(fh);
4449 
4450 	rcu_read_lock();
4451 	fp = find_file_locked(fh, hashval);
4452 	rcu_read_unlock();
4453 	if (fp)
4454 		return fp;
4455 
4456 	spin_lock(&state_lock);
4457 	fp = find_file_locked(fh, hashval);
4458 	if (likely(fp == NULL)) {
4459 		nfsd4_init_file(fh, hashval, new);
4460 		fp = new;
4461 	}
4462 	spin_unlock(&state_lock);
4463 
4464 	return fp;
4465 }
4466 
4467 /*
4468  * Called to check deny when READ with all zero stateid or
4469  * WRITE with all zero or all one stateid
4470  */
4471 static __be32
4472 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4473 {
4474 	struct nfs4_file *fp;
4475 	__be32 ret = nfs_ok;
4476 
4477 	fp = find_file(&current_fh->fh_handle);
4478 	if (!fp)
4479 		return ret;
4480 	/* Check for conflicting share reservations */
4481 	spin_lock(&fp->fi_lock);
4482 	if (fp->fi_share_deny & deny_type)
4483 		ret = nfserr_locked;
4484 	spin_unlock(&fp->fi_lock);
4485 	put_nfs4_file(fp);
4486 	return ret;
4487 }
4488 
4489 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4490 {
4491 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4492 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4493 					  nfsd_net_id);
4494 
4495 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4496 
4497 	/*
4498 	 * We can't do this in nfsd_break_deleg_cb because it is
4499 	 * already holding inode->i_lock.
4500 	 *
4501 	 * If the dl_time != 0, then we know that it has already been
4502 	 * queued for a lease break. Don't queue it again.
4503 	 */
4504 	spin_lock(&state_lock);
4505 	if (dp->dl_time == 0) {
4506 		dp->dl_time = ktime_get_boottime_seconds();
4507 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4508 	}
4509 	spin_unlock(&state_lock);
4510 }
4511 
4512 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4513 		struct rpc_task *task)
4514 {
4515 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4516 
4517 	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4518 	    dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4519 	        return 1;
4520 
4521 	switch (task->tk_status) {
4522 	case 0:
4523 		return 1;
4524 	case -NFS4ERR_DELAY:
4525 		rpc_delay(task, 2 * HZ);
4526 		return 0;
4527 	case -EBADHANDLE:
4528 	case -NFS4ERR_BAD_STATEID:
4529 		/*
4530 		 * Race: client probably got cb_recall before open reply
4531 		 * granting delegation.
4532 		 */
4533 		if (dp->dl_retries--) {
4534 			rpc_delay(task, 2 * HZ);
4535 			return 0;
4536 		}
4537 		fallthrough;
4538 	default:
4539 		return 1;
4540 	}
4541 }
4542 
4543 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4544 {
4545 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4546 
4547 	nfs4_put_stid(&dp->dl_stid);
4548 }
4549 
4550 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4551 	.prepare	= nfsd4_cb_recall_prepare,
4552 	.done		= nfsd4_cb_recall_done,
4553 	.release	= nfsd4_cb_recall_release,
4554 };
4555 
4556 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4557 {
4558 	/*
4559 	 * We're assuming the state code never drops its reference
4560 	 * without first removing the lease.  Since we're in this lease
4561 	 * callback (and since the lease code is serialized by the
4562 	 * i_lock) we know the server hasn't removed the lease yet, and
4563 	 * we know it's safe to take a reference.
4564 	 */
4565 	refcount_inc(&dp->dl_stid.sc_count);
4566 	nfsd4_run_cb(&dp->dl_recall);
4567 }
4568 
4569 /* Called from break_lease() with i_lock held. */
4570 static bool
4571 nfsd_break_deleg_cb(struct file_lock *fl)
4572 {
4573 	bool ret = false;
4574 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4575 	struct nfs4_file *fp = dp->dl_stid.sc_file;
4576 
4577 	trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
4578 
4579 	/*
4580 	 * We don't want the locks code to timeout the lease for us;
4581 	 * we'll remove it ourself if a delegation isn't returned
4582 	 * in time:
4583 	 */
4584 	fl->fl_break_time = 0;
4585 
4586 	spin_lock(&fp->fi_lock);
4587 	fp->fi_had_conflict = true;
4588 	nfsd_break_one_deleg(dp);
4589 	spin_unlock(&fp->fi_lock);
4590 	return ret;
4591 }
4592 
4593 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4594 {
4595 	struct nfs4_delegation *dl = fl->fl_owner;
4596 	struct svc_rqst *rqst;
4597 	struct nfs4_client *clp;
4598 
4599 	if (!i_am_nfsd())
4600 		return NULL;
4601 	rqst = kthread_data(current);
4602 	/* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4603 	if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4604 		return NULL;
4605 	clp = *(rqst->rq_lease_breaker);
4606 	return dl->dl_stid.sc_client == clp;
4607 }
4608 
4609 static int
4610 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4611 		     struct list_head *dispose)
4612 {
4613 	if (arg & F_UNLCK)
4614 		return lease_modify(onlist, arg, dispose);
4615 	else
4616 		return -EAGAIN;
4617 }
4618 
4619 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4620 	.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4621 	.lm_break = nfsd_break_deleg_cb,
4622 	.lm_change = nfsd_change_deleg_cb,
4623 };
4624 
4625 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4626 {
4627 	if (nfsd4_has_session(cstate))
4628 		return nfs_ok;
4629 	if (seqid == so->so_seqid - 1)
4630 		return nfserr_replay_me;
4631 	if (seqid == so->so_seqid)
4632 		return nfs_ok;
4633 	return nfserr_bad_seqid;
4634 }
4635 
4636 static __be32 lookup_clientid(clientid_t *clid,
4637 		struct nfsd4_compound_state *cstate,
4638 		struct nfsd_net *nn,
4639 		bool sessions)
4640 {
4641 	struct nfs4_client *found;
4642 
4643 	if (cstate->clp) {
4644 		found = cstate->clp;
4645 		if (!same_clid(&found->cl_clientid, clid))
4646 			return nfserr_stale_clientid;
4647 		return nfs_ok;
4648 	}
4649 
4650 	if (STALE_CLIENTID(clid, nn))
4651 		return nfserr_stale_clientid;
4652 
4653 	/*
4654 	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4655 	 * cached already then we know this is for is for v4.0 and "sessions"
4656 	 * will be false.
4657 	 */
4658 	WARN_ON_ONCE(cstate->session);
4659 	spin_lock(&nn->client_lock);
4660 	found = find_confirmed_client(clid, sessions, nn);
4661 	if (!found) {
4662 		spin_unlock(&nn->client_lock);
4663 		return nfserr_expired;
4664 	}
4665 	atomic_inc(&found->cl_rpc_users);
4666 	spin_unlock(&nn->client_lock);
4667 
4668 	/* Cache the nfs4_client in cstate! */
4669 	cstate->clp = found;
4670 	return nfs_ok;
4671 }
4672 
4673 __be32
4674 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4675 		    struct nfsd4_open *open, struct nfsd_net *nn)
4676 {
4677 	clientid_t *clientid = &open->op_clientid;
4678 	struct nfs4_client *clp = NULL;
4679 	unsigned int strhashval;
4680 	struct nfs4_openowner *oo = NULL;
4681 	__be32 status;
4682 
4683 	if (STALE_CLIENTID(&open->op_clientid, nn))
4684 		return nfserr_stale_clientid;
4685 	/*
4686 	 * In case we need it later, after we've already created the
4687 	 * file and don't want to risk a further failure:
4688 	 */
4689 	open->op_file = nfsd4_alloc_file();
4690 	if (open->op_file == NULL)
4691 		return nfserr_jukebox;
4692 
4693 	status = lookup_clientid(clientid, cstate, nn, false);
4694 	if (status)
4695 		return status;
4696 	clp = cstate->clp;
4697 
4698 	strhashval = ownerstr_hashval(&open->op_owner);
4699 	oo = find_openstateowner_str(strhashval, open, clp);
4700 	open->op_openowner = oo;
4701 	if (!oo) {
4702 		goto new_owner;
4703 	}
4704 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4705 		/* Replace unconfirmed owners without checking for replay. */
4706 		release_openowner(oo);
4707 		open->op_openowner = NULL;
4708 		goto new_owner;
4709 	}
4710 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4711 	if (status)
4712 		return status;
4713 	goto alloc_stateid;
4714 new_owner:
4715 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
4716 	if (oo == NULL)
4717 		return nfserr_jukebox;
4718 	open->op_openowner = oo;
4719 alloc_stateid:
4720 	open->op_stp = nfs4_alloc_open_stateid(clp);
4721 	if (!open->op_stp)
4722 		return nfserr_jukebox;
4723 
4724 	if (nfsd4_has_session(cstate) &&
4725 	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4726 		open->op_odstate = alloc_clnt_odstate(clp);
4727 		if (!open->op_odstate)
4728 			return nfserr_jukebox;
4729 	}
4730 
4731 	return nfs_ok;
4732 }
4733 
4734 static inline __be32
4735 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4736 {
4737 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4738 		return nfserr_openmode;
4739 	else
4740 		return nfs_ok;
4741 }
4742 
4743 static int share_access_to_flags(u32 share_access)
4744 {
4745 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4746 }
4747 
4748 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4749 {
4750 	struct nfs4_stid *ret;
4751 
4752 	ret = find_stateid_by_type(cl, s,
4753 				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4754 	if (!ret)
4755 		return NULL;
4756 	return delegstateid(ret);
4757 }
4758 
4759 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4760 {
4761 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4762 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4763 }
4764 
4765 static __be32
4766 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4767 		struct nfs4_delegation **dp)
4768 {
4769 	int flags;
4770 	__be32 status = nfserr_bad_stateid;
4771 	struct nfs4_delegation *deleg;
4772 
4773 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4774 	if (deleg == NULL)
4775 		goto out;
4776 	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4777 		nfs4_put_stid(&deleg->dl_stid);
4778 		if (cl->cl_minorversion)
4779 			status = nfserr_deleg_revoked;
4780 		goto out;
4781 	}
4782 	flags = share_access_to_flags(open->op_share_access);
4783 	status = nfs4_check_delegmode(deleg, flags);
4784 	if (status) {
4785 		nfs4_put_stid(&deleg->dl_stid);
4786 		goto out;
4787 	}
4788 	*dp = deleg;
4789 out:
4790 	if (!nfsd4_is_deleg_cur(open))
4791 		return nfs_ok;
4792 	if (status)
4793 		return status;
4794 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4795 	return nfs_ok;
4796 }
4797 
4798 static inline int nfs4_access_to_access(u32 nfs4_access)
4799 {
4800 	int flags = 0;
4801 
4802 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4803 		flags |= NFSD_MAY_READ;
4804 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4805 		flags |= NFSD_MAY_WRITE;
4806 	return flags;
4807 }
4808 
4809 static inline __be32
4810 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4811 		struct nfsd4_open *open)
4812 {
4813 	struct iattr iattr = {
4814 		.ia_valid = ATTR_SIZE,
4815 		.ia_size = 0,
4816 	};
4817 	if (!open->op_truncate)
4818 		return 0;
4819 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4820 		return nfserr_inval;
4821 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
4822 }
4823 
4824 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4825 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4826 		struct nfsd4_open *open)
4827 {
4828 	struct nfsd_file *nf = NULL;
4829 	__be32 status;
4830 	int oflag = nfs4_access_to_omode(open->op_share_access);
4831 	int access = nfs4_access_to_access(open->op_share_access);
4832 	unsigned char old_access_bmap, old_deny_bmap;
4833 
4834 	spin_lock(&fp->fi_lock);
4835 
4836 	/*
4837 	 * Are we trying to set a deny mode that would conflict with
4838 	 * current access?
4839 	 */
4840 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4841 	if (status != nfs_ok) {
4842 		spin_unlock(&fp->fi_lock);
4843 		goto out;
4844 	}
4845 
4846 	/* set access to the file */
4847 	status = nfs4_file_get_access(fp, open->op_share_access);
4848 	if (status != nfs_ok) {
4849 		spin_unlock(&fp->fi_lock);
4850 		goto out;
4851 	}
4852 
4853 	/* Set access bits in stateid */
4854 	old_access_bmap = stp->st_access_bmap;
4855 	set_access(open->op_share_access, stp);
4856 
4857 	/* Set new deny mask */
4858 	old_deny_bmap = stp->st_deny_bmap;
4859 	set_deny(open->op_share_deny, stp);
4860 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4861 
4862 	if (!fp->fi_fds[oflag]) {
4863 		spin_unlock(&fp->fi_lock);
4864 		status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4865 		if (status)
4866 			goto out_put_access;
4867 		spin_lock(&fp->fi_lock);
4868 		if (!fp->fi_fds[oflag]) {
4869 			fp->fi_fds[oflag] = nf;
4870 			nf = NULL;
4871 		}
4872 	}
4873 	spin_unlock(&fp->fi_lock);
4874 	if (nf)
4875 		nfsd_file_put(nf);
4876 
4877 	status = nfsd4_truncate(rqstp, cur_fh, open);
4878 	if (status)
4879 		goto out_put_access;
4880 out:
4881 	return status;
4882 out_put_access:
4883 	stp->st_access_bmap = old_access_bmap;
4884 	nfs4_file_put_access(fp, open->op_share_access);
4885 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4886 	goto out;
4887 }
4888 
4889 static __be32
4890 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4891 {
4892 	__be32 status;
4893 	unsigned char old_deny_bmap = stp->st_deny_bmap;
4894 
4895 	if (!test_access(open->op_share_access, stp))
4896 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4897 
4898 	/* test and set deny mode */
4899 	spin_lock(&fp->fi_lock);
4900 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4901 	if (status == nfs_ok) {
4902 		set_deny(open->op_share_deny, stp);
4903 		fp->fi_share_deny |=
4904 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4905 	}
4906 	spin_unlock(&fp->fi_lock);
4907 
4908 	if (status != nfs_ok)
4909 		return status;
4910 
4911 	status = nfsd4_truncate(rqstp, cur_fh, open);
4912 	if (status != nfs_ok)
4913 		reset_union_bmap_deny(old_deny_bmap, stp);
4914 	return status;
4915 }
4916 
4917 /* Should we give out recallable state?: */
4918 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4919 {
4920 	if (clp->cl_cb_state == NFSD4_CB_UP)
4921 		return true;
4922 	/*
4923 	 * In the sessions case, since we don't have to establish a
4924 	 * separate connection for callbacks, we assume it's OK
4925 	 * until we hear otherwise:
4926 	 */
4927 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4928 }
4929 
4930 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4931 						int flag)
4932 {
4933 	struct file_lock *fl;
4934 
4935 	fl = locks_alloc_lock();
4936 	if (!fl)
4937 		return NULL;
4938 	fl->fl_lmops = &nfsd_lease_mng_ops;
4939 	fl->fl_flags = FL_DELEG;
4940 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4941 	fl->fl_end = OFFSET_MAX;
4942 	fl->fl_owner = (fl_owner_t)dp;
4943 	fl->fl_pid = current->tgid;
4944 	fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
4945 	return fl;
4946 }
4947 
4948 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
4949 						struct nfs4_file *fp)
4950 {
4951 	struct nfs4_clnt_odstate *co;
4952 	struct file *f = fp->fi_deleg_file->nf_file;
4953 	struct inode *ino = locks_inode(f);
4954 	int writes = atomic_read(&ino->i_writecount);
4955 
4956 	if (fp->fi_fds[O_WRONLY])
4957 		writes--;
4958 	if (fp->fi_fds[O_RDWR])
4959 		writes--;
4960 	if (writes > 0)
4961 		return -EAGAIN;
4962 	spin_lock(&fp->fi_lock);
4963 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
4964 		if (co->co_client != clp) {
4965 			spin_unlock(&fp->fi_lock);
4966 			return -EAGAIN;
4967 		}
4968 	}
4969 	spin_unlock(&fp->fi_lock);
4970 	return 0;
4971 }
4972 
4973 static struct nfs4_delegation *
4974 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4975 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4976 {
4977 	int status = 0;
4978 	struct nfs4_delegation *dp;
4979 	struct nfsd_file *nf;
4980 	struct file_lock *fl;
4981 
4982 	/*
4983 	 * The fi_had_conflict and nfs_get_existing_delegation checks
4984 	 * here are just optimizations; we'll need to recheck them at
4985 	 * the end:
4986 	 */
4987 	if (fp->fi_had_conflict)
4988 		return ERR_PTR(-EAGAIN);
4989 
4990 	nf = find_readable_file(fp);
4991 	if (!nf) {
4992 		/*
4993 		 * We probably could attempt another open and get a read
4994 		 * delegation, but for now, don't bother until the
4995 		 * client actually sends us one.
4996 		 */
4997 		return ERR_PTR(-EAGAIN);
4998 	}
4999 	spin_lock(&state_lock);
5000 	spin_lock(&fp->fi_lock);
5001 	if (nfs4_delegation_exists(clp, fp))
5002 		status = -EAGAIN;
5003 	else if (!fp->fi_deleg_file) {
5004 		fp->fi_deleg_file = nf;
5005 		/* increment early to prevent fi_deleg_file from being
5006 		 * cleared */
5007 		fp->fi_delegees = 1;
5008 		nf = NULL;
5009 	} else
5010 		fp->fi_delegees++;
5011 	spin_unlock(&fp->fi_lock);
5012 	spin_unlock(&state_lock);
5013 	if (nf)
5014 		nfsd_file_put(nf);
5015 	if (status)
5016 		return ERR_PTR(status);
5017 
5018 	status = -ENOMEM;
5019 	dp = alloc_init_deleg(clp, fp, fh, odstate);
5020 	if (!dp)
5021 		goto out_delegees;
5022 
5023 	fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5024 	if (!fl)
5025 		goto out_clnt_odstate;
5026 
5027 	status = nfsd4_check_conflicting_opens(clp, fp);
5028 	if (status) {
5029 		locks_free_lock(fl);
5030 		goto out_clnt_odstate;
5031 	}
5032 	status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5033 	if (fl)
5034 		locks_free_lock(fl);
5035 	if (status)
5036 		goto out_clnt_odstate;
5037 	status = nfsd4_check_conflicting_opens(clp, fp);
5038 	if (status)
5039 		goto out_clnt_odstate;
5040 
5041 	spin_lock(&state_lock);
5042 	spin_lock(&fp->fi_lock);
5043 	if (fp->fi_had_conflict)
5044 		status = -EAGAIN;
5045 	else
5046 		status = hash_delegation_locked(dp, fp);
5047 	spin_unlock(&fp->fi_lock);
5048 	spin_unlock(&state_lock);
5049 
5050 	if (status)
5051 		goto out_unlock;
5052 
5053 	return dp;
5054 out_unlock:
5055 	vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5056 out_clnt_odstate:
5057 	put_clnt_odstate(dp->dl_clnt_odstate);
5058 	nfs4_put_stid(&dp->dl_stid);
5059 out_delegees:
5060 	put_deleg_file(fp);
5061 	return ERR_PTR(status);
5062 }
5063 
5064 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5065 {
5066 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5067 	if (status == -EAGAIN)
5068 		open->op_why_no_deleg = WND4_CONTENTION;
5069 	else {
5070 		open->op_why_no_deleg = WND4_RESOURCE;
5071 		switch (open->op_deleg_want) {
5072 		case NFS4_SHARE_WANT_READ_DELEG:
5073 		case NFS4_SHARE_WANT_WRITE_DELEG:
5074 		case NFS4_SHARE_WANT_ANY_DELEG:
5075 			break;
5076 		case NFS4_SHARE_WANT_CANCEL:
5077 			open->op_why_no_deleg = WND4_CANCELLED;
5078 			break;
5079 		case NFS4_SHARE_WANT_NO_DELEG:
5080 			WARN_ON_ONCE(1);
5081 		}
5082 	}
5083 }
5084 
5085 /*
5086  * Attempt to hand out a delegation.
5087  *
5088  * Note we don't support write delegations, and won't until the vfs has
5089  * proper support for them.
5090  */
5091 static void
5092 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
5093 			struct nfs4_ol_stateid *stp)
5094 {
5095 	struct nfs4_delegation *dp;
5096 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5097 	struct nfs4_client *clp = stp->st_stid.sc_client;
5098 	int cb_up;
5099 	int status = 0;
5100 
5101 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5102 	open->op_recall = 0;
5103 	switch (open->op_claim_type) {
5104 		case NFS4_OPEN_CLAIM_PREVIOUS:
5105 			if (!cb_up)
5106 				open->op_recall = 1;
5107 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5108 				goto out_no_deleg;
5109 			break;
5110 		case NFS4_OPEN_CLAIM_NULL:
5111 		case NFS4_OPEN_CLAIM_FH:
5112 			/*
5113 			 * Let's not give out any delegations till everyone's
5114 			 * had the chance to reclaim theirs, *and* until
5115 			 * NLM locks have all been reclaimed:
5116 			 */
5117 			if (locks_in_grace(clp->net))
5118 				goto out_no_deleg;
5119 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5120 				goto out_no_deleg;
5121 			break;
5122 		default:
5123 			goto out_no_deleg;
5124 	}
5125 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5126 	if (IS_ERR(dp))
5127 		goto out_no_deleg;
5128 
5129 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5130 
5131 	trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5132 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5133 	nfs4_put_stid(&dp->dl_stid);
5134 	return;
5135 out_no_deleg:
5136 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5137 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5138 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5139 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5140 		open->op_recall = 1;
5141 	}
5142 
5143 	/* 4.1 client asking for a delegation? */
5144 	if (open->op_deleg_want)
5145 		nfsd4_open_deleg_none_ext(open, status);
5146 	return;
5147 }
5148 
5149 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5150 					struct nfs4_delegation *dp)
5151 {
5152 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5153 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5154 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5155 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5156 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5157 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5158 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5159 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5160 	}
5161 	/* Otherwise the client must be confused wanting a delegation
5162 	 * it already has, therefore we don't return
5163 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5164 	 */
5165 }
5166 
5167 __be32
5168 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5169 {
5170 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
5171 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5172 	struct nfs4_file *fp = NULL;
5173 	struct nfs4_ol_stateid *stp = NULL;
5174 	struct nfs4_delegation *dp = NULL;
5175 	__be32 status;
5176 	bool new_stp = false;
5177 
5178 	/*
5179 	 * Lookup file; if found, lookup stateid and check open request,
5180 	 * and check for delegations in the process of being recalled.
5181 	 * If not found, create the nfs4_file struct
5182 	 */
5183 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
5184 	if (fp != open->op_file) {
5185 		status = nfs4_check_deleg(cl, open, &dp);
5186 		if (status)
5187 			goto out;
5188 		stp = nfsd4_find_and_lock_existing_open(fp, open);
5189 	} else {
5190 		open->op_file = NULL;
5191 		status = nfserr_bad_stateid;
5192 		if (nfsd4_is_deleg_cur(open))
5193 			goto out;
5194 	}
5195 
5196 	if (!stp) {
5197 		stp = init_open_stateid(fp, open);
5198 		if (!open->op_stp)
5199 			new_stp = true;
5200 	}
5201 
5202 	/*
5203 	 * OPEN the file, or upgrade an existing OPEN.
5204 	 * If truncate fails, the OPEN fails.
5205 	 *
5206 	 * stp is already locked.
5207 	 */
5208 	if (!new_stp) {
5209 		/* Stateid was found, this is an OPEN upgrade */
5210 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5211 		if (status) {
5212 			mutex_unlock(&stp->st_mutex);
5213 			goto out;
5214 		}
5215 	} else {
5216 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5217 		if (status) {
5218 			stp->st_stid.sc_type = NFS4_CLOSED_STID;
5219 			release_open_stateid(stp);
5220 			mutex_unlock(&stp->st_mutex);
5221 			goto out;
5222 		}
5223 
5224 		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5225 							open->op_odstate);
5226 		if (stp->st_clnt_odstate == open->op_odstate)
5227 			open->op_odstate = NULL;
5228 	}
5229 
5230 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5231 	mutex_unlock(&stp->st_mutex);
5232 
5233 	if (nfsd4_has_session(&resp->cstate)) {
5234 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5235 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5236 			open->op_why_no_deleg = WND4_NOT_WANTED;
5237 			goto nodeleg;
5238 		}
5239 	}
5240 
5241 	/*
5242 	* Attempt to hand out a delegation. No error return, because the
5243 	* OPEN succeeds even if we fail.
5244 	*/
5245 	nfs4_open_delegation(current_fh, open, stp);
5246 nodeleg:
5247 	status = nfs_ok;
5248 	trace_nfsd_open(&stp->st_stid.sc_stateid);
5249 out:
5250 	/* 4.1 client trying to upgrade/downgrade delegation? */
5251 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5252 	    open->op_deleg_want)
5253 		nfsd4_deleg_xgrade_none_ext(open, dp);
5254 
5255 	if (fp)
5256 		put_nfs4_file(fp);
5257 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5258 		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5259 	/*
5260 	* To finish the open response, we just need to set the rflags.
5261 	*/
5262 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5263 	if (nfsd4_has_session(&resp->cstate))
5264 		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5265 	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5266 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5267 
5268 	if (dp)
5269 		nfs4_put_stid(&dp->dl_stid);
5270 	if (stp)
5271 		nfs4_put_stid(&stp->st_stid);
5272 
5273 	return status;
5274 }
5275 
5276 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5277 			      struct nfsd4_open *open)
5278 {
5279 	if (open->op_openowner) {
5280 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5281 
5282 		nfsd4_cstate_assign_replay(cstate, so);
5283 		nfs4_put_stateowner(so);
5284 	}
5285 	if (open->op_file)
5286 		kmem_cache_free(file_slab, open->op_file);
5287 	if (open->op_stp)
5288 		nfs4_put_stid(&open->op_stp->st_stid);
5289 	if (open->op_odstate)
5290 		kmem_cache_free(odstate_slab, open->op_odstate);
5291 }
5292 
5293 __be32
5294 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5295 	    union nfsd4_op_u *u)
5296 {
5297 	clientid_t *clid = &u->renew;
5298 	struct nfs4_client *clp;
5299 	__be32 status;
5300 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5301 
5302 	trace_nfsd_clid_renew(clid);
5303 	status = lookup_clientid(clid, cstate, nn, false);
5304 	if (status)
5305 		goto out;
5306 	clp = cstate->clp;
5307 	status = nfserr_cb_path_down;
5308 	if (!list_empty(&clp->cl_delegations)
5309 			&& clp->cl_cb_state != NFSD4_CB_UP)
5310 		goto out;
5311 	status = nfs_ok;
5312 out:
5313 	return status;
5314 }
5315 
5316 void
5317 nfsd4_end_grace(struct nfsd_net *nn)
5318 {
5319 	/* do nothing if grace period already ended */
5320 	if (nn->grace_ended)
5321 		return;
5322 
5323 	trace_nfsd_grace_complete(nn);
5324 	nn->grace_ended = true;
5325 	/*
5326 	 * If the server goes down again right now, an NFSv4
5327 	 * client will still be allowed to reclaim after it comes back up,
5328 	 * even if it hasn't yet had a chance to reclaim state this time.
5329 	 *
5330 	 */
5331 	nfsd4_record_grace_done(nn);
5332 	/*
5333 	 * At this point, NFSv4 clients can still reclaim.  But if the
5334 	 * server crashes, any that have not yet reclaimed will be out
5335 	 * of luck on the next boot.
5336 	 *
5337 	 * (NFSv4.1+ clients are considered to have reclaimed once they
5338 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
5339 	 * have reclaimed after their first OPEN.)
5340 	 */
5341 	locks_end_grace(&nn->nfsd4_manager);
5342 	/*
5343 	 * At this point, and once lockd and/or any other containers
5344 	 * exit their grace period, further reclaims will fail and
5345 	 * regular locking can resume.
5346 	 */
5347 }
5348 
5349 /*
5350  * If we've waited a lease period but there are still clients trying to
5351  * reclaim, wait a little longer to give them a chance to finish.
5352  */
5353 static bool clients_still_reclaiming(struct nfsd_net *nn)
5354 {
5355 	time64_t double_grace_period_end = nn->boot_time +
5356 					   2 * nn->nfsd4_lease;
5357 
5358 	if (nn->track_reclaim_completes &&
5359 			atomic_read(&nn->nr_reclaim_complete) ==
5360 			nn->reclaim_str_hashtbl_size)
5361 		return false;
5362 	if (!nn->somebody_reclaimed)
5363 		return false;
5364 	nn->somebody_reclaimed = false;
5365 	/*
5366 	 * If we've given them *two* lease times to reclaim, and they're
5367 	 * still not done, give up:
5368 	 */
5369 	if (ktime_get_boottime_seconds() > double_grace_period_end)
5370 		return false;
5371 	return true;
5372 }
5373 
5374 static time64_t
5375 nfs4_laundromat(struct nfsd_net *nn)
5376 {
5377 	struct nfs4_client *clp;
5378 	struct nfs4_openowner *oo;
5379 	struct nfs4_delegation *dp;
5380 	struct nfs4_ol_stateid *stp;
5381 	struct nfsd4_blocked_lock *nbl;
5382 	struct list_head *pos, *next, reaplist;
5383 	time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease;
5384 	time64_t t, new_timeo = nn->nfsd4_lease;
5385 	struct nfs4_cpntf_state *cps;
5386 	copy_stateid_t *cps_t;
5387 	int i;
5388 
5389 	if (clients_still_reclaiming(nn)) {
5390 		new_timeo = 0;
5391 		goto out;
5392 	}
5393 	nfsd4_end_grace(nn);
5394 	INIT_LIST_HEAD(&reaplist);
5395 
5396 	spin_lock(&nn->s2s_cp_lock);
5397 	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5398 		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5399 		if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5400 				cps->cpntf_time > cutoff)
5401 			_free_cpntf_state_locked(nn, cps);
5402 	}
5403 	spin_unlock(&nn->s2s_cp_lock);
5404 
5405 	spin_lock(&nn->client_lock);
5406 	list_for_each_safe(pos, next, &nn->client_lru) {
5407 		clp = list_entry(pos, struct nfs4_client, cl_lru);
5408 		if (clp->cl_time > cutoff) {
5409 			t = clp->cl_time - cutoff;
5410 			new_timeo = min(new_timeo, t);
5411 			break;
5412 		}
5413 		if (mark_client_expired_locked(clp)) {
5414 			trace_nfsd_clid_expired(&clp->cl_clientid);
5415 			continue;
5416 		}
5417 		list_add(&clp->cl_lru, &reaplist);
5418 	}
5419 	spin_unlock(&nn->client_lock);
5420 	list_for_each_safe(pos, next, &reaplist) {
5421 		clp = list_entry(pos, struct nfs4_client, cl_lru);
5422 		trace_nfsd_clid_purged(&clp->cl_clientid);
5423 		list_del_init(&clp->cl_lru);
5424 		expire_client(clp);
5425 	}
5426 	spin_lock(&state_lock);
5427 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
5428 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5429 		if (dp->dl_time > cutoff) {
5430 			t = dp->dl_time - cutoff;
5431 			new_timeo = min(new_timeo, t);
5432 			break;
5433 		}
5434 		WARN_ON(!unhash_delegation_locked(dp));
5435 		list_add(&dp->dl_recall_lru, &reaplist);
5436 	}
5437 	spin_unlock(&state_lock);
5438 	while (!list_empty(&reaplist)) {
5439 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
5440 					dl_recall_lru);
5441 		list_del_init(&dp->dl_recall_lru);
5442 		revoke_delegation(dp);
5443 	}
5444 
5445 	spin_lock(&nn->client_lock);
5446 	while (!list_empty(&nn->close_lru)) {
5447 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5448 					oo_close_lru);
5449 		if (oo->oo_time > cutoff) {
5450 			t = oo->oo_time - cutoff;
5451 			new_timeo = min(new_timeo, t);
5452 			break;
5453 		}
5454 		list_del_init(&oo->oo_close_lru);
5455 		stp = oo->oo_last_closed_stid;
5456 		oo->oo_last_closed_stid = NULL;
5457 		spin_unlock(&nn->client_lock);
5458 		nfs4_put_stid(&stp->st_stid);
5459 		spin_lock(&nn->client_lock);
5460 	}
5461 	spin_unlock(&nn->client_lock);
5462 
5463 	/*
5464 	 * It's possible for a client to try and acquire an already held lock
5465 	 * that is being held for a long time, and then lose interest in it.
5466 	 * So, we clean out any un-revisited request after a lease period
5467 	 * under the assumption that the client is no longer interested.
5468 	 *
5469 	 * RFC5661, sec. 9.6 states that the client must not rely on getting
5470 	 * notifications and must continue to poll for locks, even when the
5471 	 * server supports them. Thus this shouldn't lead to clients blocking
5472 	 * indefinitely once the lock does become free.
5473 	 */
5474 	BUG_ON(!list_empty(&reaplist));
5475 	spin_lock(&nn->blocked_locks_lock);
5476 	while (!list_empty(&nn->blocked_locks_lru)) {
5477 		nbl = list_first_entry(&nn->blocked_locks_lru,
5478 					struct nfsd4_blocked_lock, nbl_lru);
5479 		if (nbl->nbl_time > cutoff) {
5480 			t = nbl->nbl_time - cutoff;
5481 			new_timeo = min(new_timeo, t);
5482 			break;
5483 		}
5484 		list_move(&nbl->nbl_lru, &reaplist);
5485 		list_del_init(&nbl->nbl_list);
5486 	}
5487 	spin_unlock(&nn->blocked_locks_lock);
5488 
5489 	while (!list_empty(&reaplist)) {
5490 		nbl = list_first_entry(&reaplist,
5491 					struct nfsd4_blocked_lock, nbl_lru);
5492 		list_del_init(&nbl->nbl_lru);
5493 		free_blocked_lock(nbl);
5494 	}
5495 out:
5496 	new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5497 	return new_timeo;
5498 }
5499 
5500 static struct workqueue_struct *laundry_wq;
5501 static void laundromat_main(struct work_struct *);
5502 
5503 static void
5504 laundromat_main(struct work_struct *laundry)
5505 {
5506 	time64_t t;
5507 	struct delayed_work *dwork = to_delayed_work(laundry);
5508 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5509 					   laundromat_work);
5510 
5511 	t = nfs4_laundromat(nn);
5512 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5513 }
5514 
5515 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5516 {
5517 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5518 		return nfserr_bad_stateid;
5519 	return nfs_ok;
5520 }
5521 
5522 static inline int
5523 access_permit_read(struct nfs4_ol_stateid *stp)
5524 {
5525 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
5526 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
5527 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
5528 }
5529 
5530 static inline int
5531 access_permit_write(struct nfs4_ol_stateid *stp)
5532 {
5533 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
5534 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
5535 }
5536 
5537 static
5538 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5539 {
5540         __be32 status = nfserr_openmode;
5541 
5542 	/* For lock stateid's, we test the parent open, not the lock: */
5543 	if (stp->st_openstp)
5544 		stp = stp->st_openstp;
5545 	if ((flags & WR_STATE) && !access_permit_write(stp))
5546                 goto out;
5547 	if ((flags & RD_STATE) && !access_permit_read(stp))
5548                 goto out;
5549 	status = nfs_ok;
5550 out:
5551 	return status;
5552 }
5553 
5554 static inline __be32
5555 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5556 {
5557 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
5558 		return nfs_ok;
5559 	else if (opens_in_grace(net)) {
5560 		/* Answer in remaining cases depends on existence of
5561 		 * conflicting state; so we must wait out the grace period. */
5562 		return nfserr_grace;
5563 	} else if (flags & WR_STATE)
5564 		return nfs4_share_conflict(current_fh,
5565 				NFS4_SHARE_DENY_WRITE);
5566 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5567 		return nfs4_share_conflict(current_fh,
5568 				NFS4_SHARE_DENY_READ);
5569 }
5570 
5571 /*
5572  * Allow READ/WRITE during grace period on recovered state only for files
5573  * that are not able to provide mandatory locking.
5574  */
5575 static inline int
5576 grace_disallows_io(struct net *net, struct inode *inode)
5577 {
5578 	return opens_in_grace(net) && mandatory_lock(inode);
5579 }
5580 
5581 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5582 {
5583 	/*
5584 	 * When sessions are used the stateid generation number is ignored
5585 	 * when it is zero.
5586 	 */
5587 	if (has_session && in->si_generation == 0)
5588 		return nfs_ok;
5589 
5590 	if (in->si_generation == ref->si_generation)
5591 		return nfs_ok;
5592 
5593 	/* If the client sends us a stateid from the future, it's buggy: */
5594 	if (nfsd4_stateid_generation_after(in, ref))
5595 		return nfserr_bad_stateid;
5596 	/*
5597 	 * However, we could see a stateid from the past, even from a
5598 	 * non-buggy client.  For example, if the client sends a lock
5599 	 * while some IO is outstanding, the lock may bump si_generation
5600 	 * while the IO is still in flight.  The client could avoid that
5601 	 * situation by waiting for responses on all the IO requests,
5602 	 * but better performance may result in retrying IO that
5603 	 * receives an old_stateid error if requests are rarely
5604 	 * reordered in flight:
5605 	 */
5606 	return nfserr_old_stateid;
5607 }
5608 
5609 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5610 {
5611 	__be32 ret;
5612 
5613 	spin_lock(&s->sc_lock);
5614 	ret = nfsd4_verify_open_stid(s);
5615 	if (ret == nfs_ok)
5616 		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5617 	spin_unlock(&s->sc_lock);
5618 	return ret;
5619 }
5620 
5621 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5622 {
5623 	if (ols->st_stateowner->so_is_open_owner &&
5624 	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5625 		return nfserr_bad_stateid;
5626 	return nfs_ok;
5627 }
5628 
5629 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5630 {
5631 	struct nfs4_stid *s;
5632 	__be32 status = nfserr_bad_stateid;
5633 
5634 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5635 		CLOSE_STATEID(stateid))
5636 		return status;
5637 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
5638 		return status;
5639 	spin_lock(&cl->cl_lock);
5640 	s = find_stateid_locked(cl, stateid);
5641 	if (!s)
5642 		goto out_unlock;
5643 	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5644 	if (status)
5645 		goto out_unlock;
5646 	switch (s->sc_type) {
5647 	case NFS4_DELEG_STID:
5648 		status = nfs_ok;
5649 		break;
5650 	case NFS4_REVOKED_DELEG_STID:
5651 		status = nfserr_deleg_revoked;
5652 		break;
5653 	case NFS4_OPEN_STID:
5654 	case NFS4_LOCK_STID:
5655 		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5656 		break;
5657 	default:
5658 		printk("unknown stateid type %x\n", s->sc_type);
5659 		fallthrough;
5660 	case NFS4_CLOSED_STID:
5661 	case NFS4_CLOSED_DELEG_STID:
5662 		status = nfserr_bad_stateid;
5663 	}
5664 out_unlock:
5665 	spin_unlock(&cl->cl_lock);
5666 	return status;
5667 }
5668 
5669 __be32
5670 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5671 		     stateid_t *stateid, unsigned char typemask,
5672 		     struct nfs4_stid **s, struct nfsd_net *nn)
5673 {
5674 	__be32 status;
5675 	bool return_revoked = false;
5676 
5677 	/*
5678 	 *  only return revoked delegations if explicitly asked.
5679 	 *  otherwise we report revoked or bad_stateid status.
5680 	 */
5681 	if (typemask & NFS4_REVOKED_DELEG_STID)
5682 		return_revoked = true;
5683 	else if (typemask & NFS4_DELEG_STID)
5684 		typemask |= NFS4_REVOKED_DELEG_STID;
5685 
5686 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5687 		CLOSE_STATEID(stateid))
5688 		return nfserr_bad_stateid;
5689 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
5690 				 false);
5691 	if (status == nfserr_stale_clientid) {
5692 		if (cstate->session)
5693 			return nfserr_bad_stateid;
5694 		return nfserr_stale_stateid;
5695 	}
5696 	if (status)
5697 		return status;
5698 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
5699 	if (!*s)
5700 		return nfserr_bad_stateid;
5701 	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5702 		nfs4_put_stid(*s);
5703 		if (cstate->minorversion)
5704 			return nfserr_deleg_revoked;
5705 		return nfserr_bad_stateid;
5706 	}
5707 	return nfs_ok;
5708 }
5709 
5710 static struct nfsd_file *
5711 nfs4_find_file(struct nfs4_stid *s, int flags)
5712 {
5713 	if (!s)
5714 		return NULL;
5715 
5716 	switch (s->sc_type) {
5717 	case NFS4_DELEG_STID:
5718 		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5719 			return NULL;
5720 		return nfsd_file_get(s->sc_file->fi_deleg_file);
5721 	case NFS4_OPEN_STID:
5722 	case NFS4_LOCK_STID:
5723 		if (flags & RD_STATE)
5724 			return find_readable_file(s->sc_file);
5725 		else
5726 			return find_writeable_file(s->sc_file);
5727 	}
5728 
5729 	return NULL;
5730 }
5731 
5732 static __be32
5733 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5734 {
5735 	__be32 status;
5736 
5737 	status = nfsd4_check_openowner_confirmed(ols);
5738 	if (status)
5739 		return status;
5740 	return nfs4_check_openmode(ols, flags);
5741 }
5742 
5743 static __be32
5744 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5745 		struct nfsd_file **nfp, int flags)
5746 {
5747 	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5748 	struct nfsd_file *nf;
5749 	__be32 status;
5750 
5751 	nf = nfs4_find_file(s, flags);
5752 	if (nf) {
5753 		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5754 				acc | NFSD_MAY_OWNER_OVERRIDE);
5755 		if (status) {
5756 			nfsd_file_put(nf);
5757 			goto out;
5758 		}
5759 	} else {
5760 		status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5761 		if (status)
5762 			return status;
5763 	}
5764 	*nfp = nf;
5765 out:
5766 	return status;
5767 }
5768 static void
5769 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5770 {
5771 	WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
5772 	if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
5773 		return;
5774 	list_del(&cps->cp_list);
5775 	idr_remove(&nn->s2s_cp_stateids,
5776 		   cps->cp_stateid.stid.si_opaque.so_id);
5777 	kfree(cps);
5778 }
5779 /*
5780  * A READ from an inter server to server COPY will have a
5781  * copy stateid. Look up the copy notify stateid from the
5782  * idr structure and take a reference on it.
5783  */
5784 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5785 			  struct nfs4_client *clp,
5786 			  struct nfs4_cpntf_state **cps)
5787 {
5788 	copy_stateid_t *cps_t;
5789 	struct nfs4_cpntf_state *state = NULL;
5790 
5791 	if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
5792 		return nfserr_bad_stateid;
5793 	spin_lock(&nn->s2s_cp_lock);
5794 	cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
5795 	if (cps_t) {
5796 		state = container_of(cps_t, struct nfs4_cpntf_state,
5797 				     cp_stateid);
5798 		if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
5799 			state = NULL;
5800 			goto unlock;
5801 		}
5802 		if (!clp)
5803 			refcount_inc(&state->cp_stateid.sc_count);
5804 		else
5805 			_free_cpntf_state_locked(nn, state);
5806 	}
5807 unlock:
5808 	spin_unlock(&nn->s2s_cp_lock);
5809 	if (!state)
5810 		return nfserr_bad_stateid;
5811 	if (!clp && state)
5812 		*cps = state;
5813 	return 0;
5814 }
5815 
5816 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5817 			       struct nfs4_stid **stid)
5818 {
5819 	__be32 status;
5820 	struct nfs4_cpntf_state *cps = NULL;
5821 	struct nfsd4_compound_state cstate;
5822 
5823 	status = manage_cpntf_state(nn, st, NULL, &cps);
5824 	if (status)
5825 		return status;
5826 
5827 	cps->cpntf_time = ktime_get_boottime_seconds();
5828 	memset(&cstate, 0, sizeof(cstate));
5829 	status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
5830 	if (status)
5831 		goto out;
5832 	status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
5833 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5834 				stid, nn);
5835 	put_client_renew(cstate.clp);
5836 out:
5837 	nfs4_put_cpntf_state(nn, cps);
5838 	return status;
5839 }
5840 
5841 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5842 {
5843 	spin_lock(&nn->s2s_cp_lock);
5844 	_free_cpntf_state_locked(nn, cps);
5845 	spin_unlock(&nn->s2s_cp_lock);
5846 }
5847 
5848 /*
5849  * Checks for stateid operations
5850  */
5851 __be32
5852 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5853 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5854 		stateid_t *stateid, int flags, struct nfsd_file **nfp,
5855 		struct nfs4_stid **cstid)
5856 {
5857 	struct inode *ino = d_inode(fhp->fh_dentry);
5858 	struct net *net = SVC_NET(rqstp);
5859 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5860 	struct nfs4_stid *s = NULL;
5861 	__be32 status;
5862 
5863 	if (nfp)
5864 		*nfp = NULL;
5865 
5866 	if (grace_disallows_io(net, ino))
5867 		return nfserr_grace;
5868 
5869 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5870 		status = check_special_stateids(net, fhp, stateid, flags);
5871 		goto done;
5872 	}
5873 
5874 	status = nfsd4_lookup_stateid(cstate, stateid,
5875 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5876 				&s, nn);
5877 	if (status == nfserr_bad_stateid)
5878 		status = find_cpntf_state(nn, stateid, &s);
5879 	if (status)
5880 		return status;
5881 	status = nfsd4_stid_check_stateid_generation(stateid, s,
5882 			nfsd4_has_session(cstate));
5883 	if (status)
5884 		goto out;
5885 
5886 	switch (s->sc_type) {
5887 	case NFS4_DELEG_STID:
5888 		status = nfs4_check_delegmode(delegstateid(s), flags);
5889 		break;
5890 	case NFS4_OPEN_STID:
5891 	case NFS4_LOCK_STID:
5892 		status = nfs4_check_olstateid(openlockstateid(s), flags);
5893 		break;
5894 	default:
5895 		status = nfserr_bad_stateid;
5896 		break;
5897 	}
5898 	if (status)
5899 		goto out;
5900 	status = nfs4_check_fh(fhp, s);
5901 
5902 done:
5903 	if (status == nfs_ok && nfp)
5904 		status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
5905 out:
5906 	if (s) {
5907 		if (!status && cstid)
5908 			*cstid = s;
5909 		else
5910 			nfs4_put_stid(s);
5911 	}
5912 	return status;
5913 }
5914 
5915 /*
5916  * Test if the stateid is valid
5917  */
5918 __be32
5919 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5920 		   union nfsd4_op_u *u)
5921 {
5922 	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5923 	struct nfsd4_test_stateid_id *stateid;
5924 	struct nfs4_client *cl = cstate->session->se_client;
5925 
5926 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5927 		stateid->ts_id_status =
5928 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5929 
5930 	return nfs_ok;
5931 }
5932 
5933 static __be32
5934 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5935 {
5936 	struct nfs4_ol_stateid *stp = openlockstateid(s);
5937 	__be32 ret;
5938 
5939 	ret = nfsd4_lock_ol_stateid(stp);
5940 	if (ret)
5941 		goto out_put_stid;
5942 
5943 	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5944 	if (ret)
5945 		goto out;
5946 
5947 	ret = nfserr_locks_held;
5948 	if (check_for_locks(stp->st_stid.sc_file,
5949 			    lockowner(stp->st_stateowner)))
5950 		goto out;
5951 
5952 	release_lock_stateid(stp);
5953 	ret = nfs_ok;
5954 
5955 out:
5956 	mutex_unlock(&stp->st_mutex);
5957 out_put_stid:
5958 	nfs4_put_stid(s);
5959 	return ret;
5960 }
5961 
5962 __be32
5963 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5964 		   union nfsd4_op_u *u)
5965 {
5966 	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5967 	stateid_t *stateid = &free_stateid->fr_stateid;
5968 	struct nfs4_stid *s;
5969 	struct nfs4_delegation *dp;
5970 	struct nfs4_client *cl = cstate->session->se_client;
5971 	__be32 ret = nfserr_bad_stateid;
5972 
5973 	spin_lock(&cl->cl_lock);
5974 	s = find_stateid_locked(cl, stateid);
5975 	if (!s)
5976 		goto out_unlock;
5977 	spin_lock(&s->sc_lock);
5978 	switch (s->sc_type) {
5979 	case NFS4_DELEG_STID:
5980 		ret = nfserr_locks_held;
5981 		break;
5982 	case NFS4_OPEN_STID:
5983 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5984 		if (ret)
5985 			break;
5986 		ret = nfserr_locks_held;
5987 		break;
5988 	case NFS4_LOCK_STID:
5989 		spin_unlock(&s->sc_lock);
5990 		refcount_inc(&s->sc_count);
5991 		spin_unlock(&cl->cl_lock);
5992 		ret = nfsd4_free_lock_stateid(stateid, s);
5993 		goto out;
5994 	case NFS4_REVOKED_DELEG_STID:
5995 		spin_unlock(&s->sc_lock);
5996 		dp = delegstateid(s);
5997 		list_del_init(&dp->dl_recall_lru);
5998 		spin_unlock(&cl->cl_lock);
5999 		nfs4_put_stid(s);
6000 		ret = nfs_ok;
6001 		goto out;
6002 	/* Default falls through and returns nfserr_bad_stateid */
6003 	}
6004 	spin_unlock(&s->sc_lock);
6005 out_unlock:
6006 	spin_unlock(&cl->cl_lock);
6007 out:
6008 	return ret;
6009 }
6010 
6011 static inline int
6012 setlkflg (int type)
6013 {
6014 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6015 		RD_STATE : WR_STATE;
6016 }
6017 
6018 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6019 {
6020 	struct svc_fh *current_fh = &cstate->current_fh;
6021 	struct nfs4_stateowner *sop = stp->st_stateowner;
6022 	__be32 status;
6023 
6024 	status = nfsd4_check_seqid(cstate, sop, seqid);
6025 	if (status)
6026 		return status;
6027 	status = nfsd4_lock_ol_stateid(stp);
6028 	if (status != nfs_ok)
6029 		return status;
6030 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6031 	if (status == nfs_ok)
6032 		status = nfs4_check_fh(current_fh, &stp->st_stid);
6033 	if (status != nfs_ok)
6034 		mutex_unlock(&stp->st_mutex);
6035 	return status;
6036 }
6037 
6038 /*
6039  * Checks for sequence id mutating operations.
6040  */
6041 static __be32
6042 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6043 			 stateid_t *stateid, char typemask,
6044 			 struct nfs4_ol_stateid **stpp,
6045 			 struct nfsd_net *nn)
6046 {
6047 	__be32 status;
6048 	struct nfs4_stid *s;
6049 	struct nfs4_ol_stateid *stp = NULL;
6050 
6051 	trace_nfsd_preprocess(seqid, stateid);
6052 
6053 	*stpp = NULL;
6054 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6055 	if (status)
6056 		return status;
6057 	stp = openlockstateid(s);
6058 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6059 
6060 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6061 	if (!status)
6062 		*stpp = stp;
6063 	else
6064 		nfs4_put_stid(&stp->st_stid);
6065 	return status;
6066 }
6067 
6068 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6069 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6070 {
6071 	__be32 status;
6072 	struct nfs4_openowner *oo;
6073 	struct nfs4_ol_stateid *stp;
6074 
6075 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6076 						NFS4_OPEN_STID, &stp, nn);
6077 	if (status)
6078 		return status;
6079 	oo = openowner(stp->st_stateowner);
6080 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6081 		mutex_unlock(&stp->st_mutex);
6082 		nfs4_put_stid(&stp->st_stid);
6083 		return nfserr_bad_stateid;
6084 	}
6085 	*stpp = stp;
6086 	return nfs_ok;
6087 }
6088 
6089 __be32
6090 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6091 		   union nfsd4_op_u *u)
6092 {
6093 	struct nfsd4_open_confirm *oc = &u->open_confirm;
6094 	__be32 status;
6095 	struct nfs4_openowner *oo;
6096 	struct nfs4_ol_stateid *stp;
6097 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6098 
6099 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6100 			cstate->current_fh.fh_dentry);
6101 
6102 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6103 	if (status)
6104 		return status;
6105 
6106 	status = nfs4_preprocess_seqid_op(cstate,
6107 					oc->oc_seqid, &oc->oc_req_stateid,
6108 					NFS4_OPEN_STID, &stp, nn);
6109 	if (status)
6110 		goto out;
6111 	oo = openowner(stp->st_stateowner);
6112 	status = nfserr_bad_stateid;
6113 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6114 		mutex_unlock(&stp->st_mutex);
6115 		goto put_stateid;
6116 	}
6117 	oo->oo_flags |= NFS4_OO_CONFIRMED;
6118 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6119 	mutex_unlock(&stp->st_mutex);
6120 	trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6121 	nfsd4_client_record_create(oo->oo_owner.so_client);
6122 	status = nfs_ok;
6123 put_stateid:
6124 	nfs4_put_stid(&stp->st_stid);
6125 out:
6126 	nfsd4_bump_seqid(cstate, status);
6127 	return status;
6128 }
6129 
6130 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6131 {
6132 	if (!test_access(access, stp))
6133 		return;
6134 	nfs4_file_put_access(stp->st_stid.sc_file, access);
6135 	clear_access(access, stp);
6136 }
6137 
6138 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6139 {
6140 	switch (to_access) {
6141 	case NFS4_SHARE_ACCESS_READ:
6142 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6143 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6144 		break;
6145 	case NFS4_SHARE_ACCESS_WRITE:
6146 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6147 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6148 		break;
6149 	case NFS4_SHARE_ACCESS_BOTH:
6150 		break;
6151 	default:
6152 		WARN_ON_ONCE(1);
6153 	}
6154 }
6155 
6156 __be32
6157 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6158 		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6159 {
6160 	struct nfsd4_open_downgrade *od = &u->open_downgrade;
6161 	__be32 status;
6162 	struct nfs4_ol_stateid *stp;
6163 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6164 
6165 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6166 			cstate->current_fh.fh_dentry);
6167 
6168 	/* We don't yet support WANT bits: */
6169 	if (od->od_deleg_want)
6170 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6171 			od->od_deleg_want);
6172 
6173 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6174 					&od->od_stateid, &stp, nn);
6175 	if (status)
6176 		goto out;
6177 	status = nfserr_inval;
6178 	if (!test_access(od->od_share_access, stp)) {
6179 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6180 			stp->st_access_bmap, od->od_share_access);
6181 		goto put_stateid;
6182 	}
6183 	if (!test_deny(od->od_share_deny, stp)) {
6184 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6185 			stp->st_deny_bmap, od->od_share_deny);
6186 		goto put_stateid;
6187 	}
6188 	nfs4_stateid_downgrade(stp, od->od_share_access);
6189 	reset_union_bmap_deny(od->od_share_deny, stp);
6190 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6191 	status = nfs_ok;
6192 put_stateid:
6193 	mutex_unlock(&stp->st_mutex);
6194 	nfs4_put_stid(&stp->st_stid);
6195 out:
6196 	nfsd4_bump_seqid(cstate, status);
6197 	return status;
6198 }
6199 
6200 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6201 {
6202 	struct nfs4_client *clp = s->st_stid.sc_client;
6203 	bool unhashed;
6204 	LIST_HEAD(reaplist);
6205 
6206 	spin_lock(&clp->cl_lock);
6207 	unhashed = unhash_open_stateid(s, &reaplist);
6208 
6209 	if (clp->cl_minorversion) {
6210 		if (unhashed)
6211 			put_ol_stateid_locked(s, &reaplist);
6212 		spin_unlock(&clp->cl_lock);
6213 		free_ol_stateid_reaplist(&reaplist);
6214 	} else {
6215 		spin_unlock(&clp->cl_lock);
6216 		free_ol_stateid_reaplist(&reaplist);
6217 		if (unhashed)
6218 			move_to_close_lru(s, clp->net);
6219 	}
6220 }
6221 
6222 /*
6223  * nfs4_unlock_state() called after encode
6224  */
6225 __be32
6226 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6227 		union nfsd4_op_u *u)
6228 {
6229 	struct nfsd4_close *close = &u->close;
6230 	__be32 status;
6231 	struct nfs4_ol_stateid *stp;
6232 	struct net *net = SVC_NET(rqstp);
6233 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6234 
6235 	dprintk("NFSD: nfsd4_close on file %pd\n",
6236 			cstate->current_fh.fh_dentry);
6237 
6238 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6239 					&close->cl_stateid,
6240 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
6241 					&stp, nn);
6242 	nfsd4_bump_seqid(cstate, status);
6243 	if (status)
6244 		goto out;
6245 
6246 	stp->st_stid.sc_type = NFS4_CLOSED_STID;
6247 
6248 	/*
6249 	 * Technically we don't _really_ have to increment or copy it, since
6250 	 * it should just be gone after this operation and we clobber the
6251 	 * copied value below, but we continue to do so here just to ensure
6252 	 * that racing ops see that there was a state change.
6253 	 */
6254 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6255 
6256 	nfsd4_close_open_stateid(stp);
6257 	mutex_unlock(&stp->st_mutex);
6258 
6259 	/* v4.1+ suggests that we send a special stateid in here, since the
6260 	 * clients should just ignore this anyway. Since this is not useful
6261 	 * for v4.0 clients either, we set it to the special close_stateid
6262 	 * universally.
6263 	 *
6264 	 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6265 	 */
6266 	memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6267 
6268 	/* put reference from nfs4_preprocess_seqid_op */
6269 	nfs4_put_stid(&stp->st_stid);
6270 out:
6271 	return status;
6272 }
6273 
6274 __be32
6275 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6276 		  union nfsd4_op_u *u)
6277 {
6278 	struct nfsd4_delegreturn *dr = &u->delegreturn;
6279 	struct nfs4_delegation *dp;
6280 	stateid_t *stateid = &dr->dr_stateid;
6281 	struct nfs4_stid *s;
6282 	__be32 status;
6283 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6284 
6285 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6286 		return status;
6287 
6288 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6289 	if (status)
6290 		goto out;
6291 	dp = delegstateid(s);
6292 	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6293 	if (status)
6294 		goto put_stateid;
6295 
6296 	destroy_delegation(dp);
6297 put_stateid:
6298 	nfs4_put_stid(&dp->dl_stid);
6299 out:
6300 	return status;
6301 }
6302 
6303 static inline u64
6304 end_offset(u64 start, u64 len)
6305 {
6306 	u64 end;
6307 
6308 	end = start + len;
6309 	return end >= start ? end: NFS4_MAX_UINT64;
6310 }
6311 
6312 /* last octet in a range */
6313 static inline u64
6314 last_byte_offset(u64 start, u64 len)
6315 {
6316 	u64 end;
6317 
6318 	WARN_ON_ONCE(!len);
6319 	end = start + len;
6320 	return end > start ? end - 1: NFS4_MAX_UINT64;
6321 }
6322 
6323 /*
6324  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6325  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6326  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
6327  * locking, this prevents us from being completely protocol-compliant.  The
6328  * real solution to this problem is to start using unsigned file offsets in
6329  * the VFS, but this is a very deep change!
6330  */
6331 static inline void
6332 nfs4_transform_lock_offset(struct file_lock *lock)
6333 {
6334 	if (lock->fl_start < 0)
6335 		lock->fl_start = OFFSET_MAX;
6336 	if (lock->fl_end < 0)
6337 		lock->fl_end = OFFSET_MAX;
6338 }
6339 
6340 static fl_owner_t
6341 nfsd4_fl_get_owner(fl_owner_t owner)
6342 {
6343 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6344 
6345 	nfs4_get_stateowner(&lo->lo_owner);
6346 	return owner;
6347 }
6348 
6349 static void
6350 nfsd4_fl_put_owner(fl_owner_t owner)
6351 {
6352 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6353 
6354 	if (lo)
6355 		nfs4_put_stateowner(&lo->lo_owner);
6356 }
6357 
6358 static void
6359 nfsd4_lm_notify(struct file_lock *fl)
6360 {
6361 	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
6362 	struct net			*net = lo->lo_owner.so_client->net;
6363 	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
6364 	struct nfsd4_blocked_lock	*nbl = container_of(fl,
6365 						struct nfsd4_blocked_lock, nbl_lock);
6366 	bool queue = false;
6367 
6368 	/* An empty list means that something else is going to be using it */
6369 	spin_lock(&nn->blocked_locks_lock);
6370 	if (!list_empty(&nbl->nbl_list)) {
6371 		list_del_init(&nbl->nbl_list);
6372 		list_del_init(&nbl->nbl_lru);
6373 		queue = true;
6374 	}
6375 	spin_unlock(&nn->blocked_locks_lock);
6376 
6377 	if (queue)
6378 		nfsd4_run_cb(&nbl->nbl_cb);
6379 }
6380 
6381 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
6382 	.lm_notify = nfsd4_lm_notify,
6383 	.lm_get_owner = nfsd4_fl_get_owner,
6384 	.lm_put_owner = nfsd4_fl_put_owner,
6385 };
6386 
6387 static inline void
6388 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6389 {
6390 	struct nfs4_lockowner *lo;
6391 
6392 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6393 		lo = (struct nfs4_lockowner *) fl->fl_owner;
6394 		xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6395 						GFP_KERNEL);
6396 		if (!deny->ld_owner.data)
6397 			/* We just don't care that much */
6398 			goto nevermind;
6399 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6400 	} else {
6401 nevermind:
6402 		deny->ld_owner.len = 0;
6403 		deny->ld_owner.data = NULL;
6404 		deny->ld_clientid.cl_boot = 0;
6405 		deny->ld_clientid.cl_id = 0;
6406 	}
6407 	deny->ld_start = fl->fl_start;
6408 	deny->ld_length = NFS4_MAX_UINT64;
6409 	if (fl->fl_end != NFS4_MAX_UINT64)
6410 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
6411 	deny->ld_type = NFS4_READ_LT;
6412 	if (fl->fl_type != F_RDLCK)
6413 		deny->ld_type = NFS4_WRITE_LT;
6414 }
6415 
6416 static struct nfs4_lockowner *
6417 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6418 {
6419 	unsigned int strhashval = ownerstr_hashval(owner);
6420 	struct nfs4_stateowner *so;
6421 
6422 	lockdep_assert_held(&clp->cl_lock);
6423 
6424 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6425 			    so_strhash) {
6426 		if (so->so_is_open_owner)
6427 			continue;
6428 		if (same_owner_str(so, owner))
6429 			return lockowner(nfs4_get_stateowner(so));
6430 	}
6431 	return NULL;
6432 }
6433 
6434 static struct nfs4_lockowner *
6435 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6436 {
6437 	struct nfs4_lockowner *lo;
6438 
6439 	spin_lock(&clp->cl_lock);
6440 	lo = find_lockowner_str_locked(clp, owner);
6441 	spin_unlock(&clp->cl_lock);
6442 	return lo;
6443 }
6444 
6445 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6446 {
6447 	unhash_lockowner_locked(lockowner(sop));
6448 }
6449 
6450 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6451 {
6452 	struct nfs4_lockowner *lo = lockowner(sop);
6453 
6454 	kmem_cache_free(lockowner_slab, lo);
6455 }
6456 
6457 static const struct nfs4_stateowner_operations lockowner_ops = {
6458 	.so_unhash =	nfs4_unhash_lockowner,
6459 	.so_free =	nfs4_free_lockowner,
6460 };
6461 
6462 /*
6463  * Alloc a lock owner structure.
6464  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6465  * occurred.
6466  *
6467  * strhashval = ownerstr_hashval
6468  */
6469 static struct nfs4_lockowner *
6470 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6471 			   struct nfs4_ol_stateid *open_stp,
6472 			   struct nfsd4_lock *lock)
6473 {
6474 	struct nfs4_lockowner *lo, *ret;
6475 
6476 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6477 	if (!lo)
6478 		return NULL;
6479 	INIT_LIST_HEAD(&lo->lo_blocked);
6480 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6481 	lo->lo_owner.so_is_open_owner = 0;
6482 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6483 	lo->lo_owner.so_ops = &lockowner_ops;
6484 	spin_lock(&clp->cl_lock);
6485 	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6486 	if (ret == NULL) {
6487 		list_add(&lo->lo_owner.so_strhash,
6488 			 &clp->cl_ownerstr_hashtbl[strhashval]);
6489 		ret = lo;
6490 	} else
6491 		nfs4_free_stateowner(&lo->lo_owner);
6492 
6493 	spin_unlock(&clp->cl_lock);
6494 	return ret;
6495 }
6496 
6497 static struct nfs4_ol_stateid *
6498 find_lock_stateid(const struct nfs4_lockowner *lo,
6499 		  const struct nfs4_ol_stateid *ost)
6500 {
6501 	struct nfs4_ol_stateid *lst;
6502 
6503 	lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6504 
6505 	/* If ost is not hashed, ost->st_locks will not be valid */
6506 	if (!nfs4_ol_stateid_unhashed(ost))
6507 		list_for_each_entry(lst, &ost->st_locks, st_locks) {
6508 			if (lst->st_stateowner == &lo->lo_owner) {
6509 				refcount_inc(&lst->st_stid.sc_count);
6510 				return lst;
6511 			}
6512 		}
6513 	return NULL;
6514 }
6515 
6516 static struct nfs4_ol_stateid *
6517 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6518 		  struct nfs4_file *fp, struct inode *inode,
6519 		  struct nfs4_ol_stateid *open_stp)
6520 {
6521 	struct nfs4_client *clp = lo->lo_owner.so_client;
6522 	struct nfs4_ol_stateid *retstp;
6523 
6524 	mutex_init(&stp->st_mutex);
6525 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6526 retry:
6527 	spin_lock(&clp->cl_lock);
6528 	if (nfs4_ol_stateid_unhashed(open_stp))
6529 		goto out_close;
6530 	retstp = find_lock_stateid(lo, open_stp);
6531 	if (retstp)
6532 		goto out_found;
6533 	refcount_inc(&stp->st_stid.sc_count);
6534 	stp->st_stid.sc_type = NFS4_LOCK_STID;
6535 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6536 	get_nfs4_file(fp);
6537 	stp->st_stid.sc_file = fp;
6538 	stp->st_access_bmap = 0;
6539 	stp->st_deny_bmap = open_stp->st_deny_bmap;
6540 	stp->st_openstp = open_stp;
6541 	spin_lock(&fp->fi_lock);
6542 	list_add(&stp->st_locks, &open_stp->st_locks);
6543 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6544 	list_add(&stp->st_perfile, &fp->fi_stateids);
6545 	spin_unlock(&fp->fi_lock);
6546 	spin_unlock(&clp->cl_lock);
6547 	return stp;
6548 out_found:
6549 	spin_unlock(&clp->cl_lock);
6550 	if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6551 		nfs4_put_stid(&retstp->st_stid);
6552 		goto retry;
6553 	}
6554 	/* To keep mutex tracking happy */
6555 	mutex_unlock(&stp->st_mutex);
6556 	return retstp;
6557 out_close:
6558 	spin_unlock(&clp->cl_lock);
6559 	mutex_unlock(&stp->st_mutex);
6560 	return NULL;
6561 }
6562 
6563 static struct nfs4_ol_stateid *
6564 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6565 			    struct inode *inode, struct nfs4_ol_stateid *ost,
6566 			    bool *new)
6567 {
6568 	struct nfs4_stid *ns = NULL;
6569 	struct nfs4_ol_stateid *lst;
6570 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6571 	struct nfs4_client *clp = oo->oo_owner.so_client;
6572 
6573 	*new = false;
6574 	spin_lock(&clp->cl_lock);
6575 	lst = find_lock_stateid(lo, ost);
6576 	spin_unlock(&clp->cl_lock);
6577 	if (lst != NULL) {
6578 		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6579 			goto out;
6580 		nfs4_put_stid(&lst->st_stid);
6581 	}
6582 	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6583 	if (ns == NULL)
6584 		return NULL;
6585 
6586 	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6587 	if (lst == openlockstateid(ns))
6588 		*new = true;
6589 	else
6590 		nfs4_put_stid(ns);
6591 out:
6592 	return lst;
6593 }
6594 
6595 static int
6596 check_lock_length(u64 offset, u64 length)
6597 {
6598 	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6599 		(length > ~offset)));
6600 }
6601 
6602 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6603 {
6604 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6605 
6606 	lockdep_assert_held(&fp->fi_lock);
6607 
6608 	if (test_access(access, lock_stp))
6609 		return;
6610 	__nfs4_file_get_access(fp, access);
6611 	set_access(access, lock_stp);
6612 }
6613 
6614 static __be32
6615 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6616 			    struct nfs4_ol_stateid *ost,
6617 			    struct nfsd4_lock *lock,
6618 			    struct nfs4_ol_stateid **plst, bool *new)
6619 {
6620 	__be32 status;
6621 	struct nfs4_file *fi = ost->st_stid.sc_file;
6622 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6623 	struct nfs4_client *cl = oo->oo_owner.so_client;
6624 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6625 	struct nfs4_lockowner *lo;
6626 	struct nfs4_ol_stateid *lst;
6627 	unsigned int strhashval;
6628 
6629 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
6630 	if (!lo) {
6631 		strhashval = ownerstr_hashval(&lock->lk_new_owner);
6632 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6633 		if (lo == NULL)
6634 			return nfserr_jukebox;
6635 	} else {
6636 		/* with an existing lockowner, seqids must be the same */
6637 		status = nfserr_bad_seqid;
6638 		if (!cstate->minorversion &&
6639 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6640 			goto out;
6641 	}
6642 
6643 	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6644 	if (lst == NULL) {
6645 		status = nfserr_jukebox;
6646 		goto out;
6647 	}
6648 
6649 	status = nfs_ok;
6650 	*plst = lst;
6651 out:
6652 	nfs4_put_stateowner(&lo->lo_owner);
6653 	return status;
6654 }
6655 
6656 /*
6657  *  LOCK operation
6658  */
6659 __be32
6660 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6661 	   union nfsd4_op_u *u)
6662 {
6663 	struct nfsd4_lock *lock = &u->lock;
6664 	struct nfs4_openowner *open_sop = NULL;
6665 	struct nfs4_lockowner *lock_sop = NULL;
6666 	struct nfs4_ol_stateid *lock_stp = NULL;
6667 	struct nfs4_ol_stateid *open_stp = NULL;
6668 	struct nfs4_file *fp;
6669 	struct nfsd_file *nf = NULL;
6670 	struct nfsd4_blocked_lock *nbl = NULL;
6671 	struct file_lock *file_lock = NULL;
6672 	struct file_lock *conflock = NULL;
6673 	__be32 status = 0;
6674 	int lkflg;
6675 	int err;
6676 	bool new = false;
6677 	unsigned char fl_type;
6678 	unsigned int fl_flags = FL_POSIX;
6679 	struct net *net = SVC_NET(rqstp);
6680 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6681 
6682 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6683 		(long long) lock->lk_offset,
6684 		(long long) lock->lk_length);
6685 
6686 	if (check_lock_length(lock->lk_offset, lock->lk_length))
6687 		 return nfserr_inval;
6688 
6689 	if ((status = fh_verify(rqstp, &cstate->current_fh,
6690 				S_IFREG, NFSD_MAY_LOCK))) {
6691 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
6692 		return status;
6693 	}
6694 
6695 	if (lock->lk_is_new) {
6696 		if (nfsd4_has_session(cstate))
6697 			/* See rfc 5661 18.10.3: given clientid is ignored: */
6698 			memcpy(&lock->lk_new_clientid,
6699 				&cstate->session->se_client->cl_clientid,
6700 				sizeof(clientid_t));
6701 
6702 		status = nfserr_stale_clientid;
6703 		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
6704 			goto out;
6705 
6706 		/* validate and update open stateid and open seqid */
6707 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
6708 				        lock->lk_new_open_seqid,
6709 		                        &lock->lk_new_open_stateid,
6710 					&open_stp, nn);
6711 		if (status)
6712 			goto out;
6713 		mutex_unlock(&open_stp->st_mutex);
6714 		open_sop = openowner(open_stp->st_stateowner);
6715 		status = nfserr_bad_stateid;
6716 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6717 						&lock->lk_new_clientid))
6718 			goto out;
6719 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
6720 							&lock_stp, &new);
6721 	} else {
6722 		status = nfs4_preprocess_seqid_op(cstate,
6723 				       lock->lk_old_lock_seqid,
6724 				       &lock->lk_old_lock_stateid,
6725 				       NFS4_LOCK_STID, &lock_stp, nn);
6726 	}
6727 	if (status)
6728 		goto out;
6729 	lock_sop = lockowner(lock_stp->st_stateowner);
6730 
6731 	lkflg = setlkflg(lock->lk_type);
6732 	status = nfs4_check_openmode(lock_stp, lkflg);
6733 	if (status)
6734 		goto out;
6735 
6736 	status = nfserr_grace;
6737 	if (locks_in_grace(net) && !lock->lk_reclaim)
6738 		goto out;
6739 	status = nfserr_no_grace;
6740 	if (!locks_in_grace(net) && lock->lk_reclaim)
6741 		goto out;
6742 
6743 	fp = lock_stp->st_stid.sc_file;
6744 	switch (lock->lk_type) {
6745 		case NFS4_READW_LT:
6746 			if (nfsd4_has_session(cstate))
6747 				fl_flags |= FL_SLEEP;
6748 			fallthrough;
6749 		case NFS4_READ_LT:
6750 			spin_lock(&fp->fi_lock);
6751 			nf = find_readable_file_locked(fp);
6752 			if (nf)
6753 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6754 			spin_unlock(&fp->fi_lock);
6755 			fl_type = F_RDLCK;
6756 			break;
6757 		case NFS4_WRITEW_LT:
6758 			if (nfsd4_has_session(cstate))
6759 				fl_flags |= FL_SLEEP;
6760 			fallthrough;
6761 		case NFS4_WRITE_LT:
6762 			spin_lock(&fp->fi_lock);
6763 			nf = find_writeable_file_locked(fp);
6764 			if (nf)
6765 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6766 			spin_unlock(&fp->fi_lock);
6767 			fl_type = F_WRLCK;
6768 			break;
6769 		default:
6770 			status = nfserr_inval;
6771 		goto out;
6772 	}
6773 
6774 	if (!nf) {
6775 		status = nfserr_openmode;
6776 		goto out;
6777 	}
6778 
6779 	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6780 	if (!nbl) {
6781 		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6782 		status = nfserr_jukebox;
6783 		goto out;
6784 	}
6785 
6786 	file_lock = &nbl->nbl_lock;
6787 	file_lock->fl_type = fl_type;
6788 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6789 	file_lock->fl_pid = current->tgid;
6790 	file_lock->fl_file = nf->nf_file;
6791 	file_lock->fl_flags = fl_flags;
6792 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6793 	file_lock->fl_start = lock->lk_offset;
6794 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6795 	nfs4_transform_lock_offset(file_lock);
6796 
6797 	conflock = locks_alloc_lock();
6798 	if (!conflock) {
6799 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6800 		status = nfserr_jukebox;
6801 		goto out;
6802 	}
6803 
6804 	if (fl_flags & FL_SLEEP) {
6805 		nbl->nbl_time = ktime_get_boottime_seconds();
6806 		spin_lock(&nn->blocked_locks_lock);
6807 		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6808 		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6809 		spin_unlock(&nn->blocked_locks_lock);
6810 	}
6811 
6812 	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6813 	switch (err) {
6814 	case 0: /* success! */
6815 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6816 		status = 0;
6817 		if (lock->lk_reclaim)
6818 			nn->somebody_reclaimed = true;
6819 		break;
6820 	case FILE_LOCK_DEFERRED:
6821 		nbl = NULL;
6822 		fallthrough;
6823 	case -EAGAIN:		/* conflock holds conflicting lock */
6824 		status = nfserr_denied;
6825 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6826 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
6827 		break;
6828 	case -EDEADLK:
6829 		status = nfserr_deadlock;
6830 		break;
6831 	default:
6832 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6833 		status = nfserrno(err);
6834 		break;
6835 	}
6836 out:
6837 	if (nbl) {
6838 		/* dequeue it if we queued it before */
6839 		if (fl_flags & FL_SLEEP) {
6840 			spin_lock(&nn->blocked_locks_lock);
6841 			list_del_init(&nbl->nbl_list);
6842 			list_del_init(&nbl->nbl_lru);
6843 			spin_unlock(&nn->blocked_locks_lock);
6844 		}
6845 		free_blocked_lock(nbl);
6846 	}
6847 	if (nf)
6848 		nfsd_file_put(nf);
6849 	if (lock_stp) {
6850 		/* Bump seqid manually if the 4.0 replay owner is openowner */
6851 		if (cstate->replay_owner &&
6852 		    cstate->replay_owner != &lock_sop->lo_owner &&
6853 		    seqid_mutating_err(ntohl(status)))
6854 			lock_sop->lo_owner.so_seqid++;
6855 
6856 		/*
6857 		 * If this is a new, never-before-used stateid, and we are
6858 		 * returning an error, then just go ahead and release it.
6859 		 */
6860 		if (status && new)
6861 			release_lock_stateid(lock_stp);
6862 
6863 		mutex_unlock(&lock_stp->st_mutex);
6864 
6865 		nfs4_put_stid(&lock_stp->st_stid);
6866 	}
6867 	if (open_stp)
6868 		nfs4_put_stid(&open_stp->st_stid);
6869 	nfsd4_bump_seqid(cstate, status);
6870 	if (conflock)
6871 		locks_free_lock(conflock);
6872 	return status;
6873 }
6874 
6875 /*
6876  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6877  * so we do a temporary open here just to get an open file to pass to
6878  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
6879  * inode operation.)
6880  */
6881 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6882 {
6883 	struct nfsd_file *nf;
6884 	__be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
6885 	if (!err) {
6886 		err = nfserrno(vfs_test_lock(nf->nf_file, lock));
6887 		nfsd_file_put(nf);
6888 	}
6889 	return err;
6890 }
6891 
6892 /*
6893  * LOCKT operation
6894  */
6895 __be32
6896 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6897 	    union nfsd4_op_u *u)
6898 {
6899 	struct nfsd4_lockt *lockt = &u->lockt;
6900 	struct file_lock *file_lock = NULL;
6901 	struct nfs4_lockowner *lo = NULL;
6902 	__be32 status;
6903 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6904 
6905 	if (locks_in_grace(SVC_NET(rqstp)))
6906 		return nfserr_grace;
6907 
6908 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6909 		 return nfserr_inval;
6910 
6911 	if (!nfsd4_has_session(cstate)) {
6912 		status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
6913 					 false);
6914 		if (status)
6915 			goto out;
6916 	}
6917 
6918 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6919 		goto out;
6920 
6921 	file_lock = locks_alloc_lock();
6922 	if (!file_lock) {
6923 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6924 		status = nfserr_jukebox;
6925 		goto out;
6926 	}
6927 
6928 	switch (lockt->lt_type) {
6929 		case NFS4_READ_LT:
6930 		case NFS4_READW_LT:
6931 			file_lock->fl_type = F_RDLCK;
6932 			break;
6933 		case NFS4_WRITE_LT:
6934 		case NFS4_WRITEW_LT:
6935 			file_lock->fl_type = F_WRLCK;
6936 			break;
6937 		default:
6938 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6939 			status = nfserr_inval;
6940 			goto out;
6941 	}
6942 
6943 	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6944 	if (lo)
6945 		file_lock->fl_owner = (fl_owner_t)lo;
6946 	file_lock->fl_pid = current->tgid;
6947 	file_lock->fl_flags = FL_POSIX;
6948 
6949 	file_lock->fl_start = lockt->lt_offset;
6950 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6951 
6952 	nfs4_transform_lock_offset(file_lock);
6953 
6954 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6955 	if (status)
6956 		goto out;
6957 
6958 	if (file_lock->fl_type != F_UNLCK) {
6959 		status = nfserr_denied;
6960 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6961 	}
6962 out:
6963 	if (lo)
6964 		nfs4_put_stateowner(&lo->lo_owner);
6965 	if (file_lock)
6966 		locks_free_lock(file_lock);
6967 	return status;
6968 }
6969 
6970 __be32
6971 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6972 	    union nfsd4_op_u *u)
6973 {
6974 	struct nfsd4_locku *locku = &u->locku;
6975 	struct nfs4_ol_stateid *stp;
6976 	struct nfsd_file *nf = NULL;
6977 	struct file_lock *file_lock = NULL;
6978 	__be32 status;
6979 	int err;
6980 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6981 
6982 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6983 		(long long) locku->lu_offset,
6984 		(long long) locku->lu_length);
6985 
6986 	if (check_lock_length(locku->lu_offset, locku->lu_length))
6987 		 return nfserr_inval;
6988 
6989 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6990 					&locku->lu_stateid, NFS4_LOCK_STID,
6991 					&stp, nn);
6992 	if (status)
6993 		goto out;
6994 	nf = find_any_file(stp->st_stid.sc_file);
6995 	if (!nf) {
6996 		status = nfserr_lock_range;
6997 		goto put_stateid;
6998 	}
6999 	file_lock = locks_alloc_lock();
7000 	if (!file_lock) {
7001 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7002 		status = nfserr_jukebox;
7003 		goto put_file;
7004 	}
7005 
7006 	file_lock->fl_type = F_UNLCK;
7007 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7008 	file_lock->fl_pid = current->tgid;
7009 	file_lock->fl_file = nf->nf_file;
7010 	file_lock->fl_flags = FL_POSIX;
7011 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
7012 	file_lock->fl_start = locku->lu_offset;
7013 
7014 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
7015 						locku->lu_length);
7016 	nfs4_transform_lock_offset(file_lock);
7017 
7018 	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7019 	if (err) {
7020 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7021 		goto out_nfserr;
7022 	}
7023 	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7024 put_file:
7025 	nfsd_file_put(nf);
7026 put_stateid:
7027 	mutex_unlock(&stp->st_mutex);
7028 	nfs4_put_stid(&stp->st_stid);
7029 out:
7030 	nfsd4_bump_seqid(cstate, status);
7031 	if (file_lock)
7032 		locks_free_lock(file_lock);
7033 	return status;
7034 
7035 out_nfserr:
7036 	status = nfserrno(err);
7037 	goto put_file;
7038 }
7039 
7040 /*
7041  * returns
7042  * 	true:  locks held by lockowner
7043  * 	false: no locks held by lockowner
7044  */
7045 static bool
7046 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7047 {
7048 	struct file_lock *fl;
7049 	int status = false;
7050 	struct nfsd_file *nf = find_any_file(fp);
7051 	struct inode *inode;
7052 	struct file_lock_context *flctx;
7053 
7054 	if (!nf) {
7055 		/* Any valid lock stateid should have some sort of access */
7056 		WARN_ON_ONCE(1);
7057 		return status;
7058 	}
7059 
7060 	inode = locks_inode(nf->nf_file);
7061 	flctx = inode->i_flctx;
7062 
7063 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7064 		spin_lock(&flctx->flc_lock);
7065 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7066 			if (fl->fl_owner == (fl_owner_t)lowner) {
7067 				status = true;
7068 				break;
7069 			}
7070 		}
7071 		spin_unlock(&flctx->flc_lock);
7072 	}
7073 	nfsd_file_put(nf);
7074 	return status;
7075 }
7076 
7077 __be32
7078 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7079 			struct nfsd4_compound_state *cstate,
7080 			union nfsd4_op_u *u)
7081 {
7082 	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7083 	clientid_t *clid = &rlockowner->rl_clientid;
7084 	struct nfs4_stateowner *sop;
7085 	struct nfs4_lockowner *lo = NULL;
7086 	struct nfs4_ol_stateid *stp;
7087 	struct xdr_netobj *owner = &rlockowner->rl_owner;
7088 	unsigned int hashval = ownerstr_hashval(owner);
7089 	__be32 status;
7090 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7091 	struct nfs4_client *clp;
7092 	LIST_HEAD (reaplist);
7093 
7094 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7095 		clid->cl_boot, clid->cl_id);
7096 
7097 	status = lookup_clientid(clid, cstate, nn, false);
7098 	if (status)
7099 		return status;
7100 
7101 	clp = cstate->clp;
7102 	/* Find the matching lock stateowner */
7103 	spin_lock(&clp->cl_lock);
7104 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
7105 			    so_strhash) {
7106 
7107 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
7108 			continue;
7109 
7110 		/* see if there are still any locks associated with it */
7111 		lo = lockowner(sop);
7112 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
7113 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
7114 				status = nfserr_locks_held;
7115 				spin_unlock(&clp->cl_lock);
7116 				return status;
7117 			}
7118 		}
7119 
7120 		nfs4_get_stateowner(sop);
7121 		break;
7122 	}
7123 	if (!lo) {
7124 		spin_unlock(&clp->cl_lock);
7125 		return status;
7126 	}
7127 
7128 	unhash_lockowner_locked(lo);
7129 	while (!list_empty(&lo->lo_owner.so_stateids)) {
7130 		stp = list_first_entry(&lo->lo_owner.so_stateids,
7131 				       struct nfs4_ol_stateid,
7132 				       st_perstateowner);
7133 		WARN_ON(!unhash_lock_stateid(stp));
7134 		put_ol_stateid_locked(stp, &reaplist);
7135 	}
7136 	spin_unlock(&clp->cl_lock);
7137 	free_ol_stateid_reaplist(&reaplist);
7138 	remove_blocked_locks(lo);
7139 	nfs4_put_stateowner(&lo->lo_owner);
7140 
7141 	return status;
7142 }
7143 
7144 static inline struct nfs4_client_reclaim *
7145 alloc_reclaim(void)
7146 {
7147 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7148 }
7149 
7150 bool
7151 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7152 {
7153 	struct nfs4_client_reclaim *crp;
7154 
7155 	crp = nfsd4_find_reclaim_client(name, nn);
7156 	return (crp && crp->cr_clp);
7157 }
7158 
7159 /*
7160  * failure => all reset bets are off, nfserr_no_grace...
7161  *
7162  * The caller is responsible for freeing name.data if NULL is returned (it
7163  * will be freed in nfs4_remove_reclaim_record in the normal case).
7164  */
7165 struct nfs4_client_reclaim *
7166 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7167 		struct nfsd_net *nn)
7168 {
7169 	unsigned int strhashval;
7170 	struct nfs4_client_reclaim *crp;
7171 
7172 	trace_nfsd_clid_reclaim(nn, name.len, name.data);
7173 	crp = alloc_reclaim();
7174 	if (crp) {
7175 		strhashval = clientstr_hashval(name);
7176 		INIT_LIST_HEAD(&crp->cr_strhash);
7177 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7178 		crp->cr_name.data = name.data;
7179 		crp->cr_name.len = name.len;
7180 		crp->cr_princhash.data = princhash.data;
7181 		crp->cr_princhash.len = princhash.len;
7182 		crp->cr_clp = NULL;
7183 		nn->reclaim_str_hashtbl_size++;
7184 	}
7185 	return crp;
7186 }
7187 
7188 void
7189 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7190 {
7191 	list_del(&crp->cr_strhash);
7192 	kfree(crp->cr_name.data);
7193 	kfree(crp->cr_princhash.data);
7194 	kfree(crp);
7195 	nn->reclaim_str_hashtbl_size--;
7196 }
7197 
7198 void
7199 nfs4_release_reclaim(struct nfsd_net *nn)
7200 {
7201 	struct nfs4_client_reclaim *crp = NULL;
7202 	int i;
7203 
7204 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7205 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7206 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7207 			                struct nfs4_client_reclaim, cr_strhash);
7208 			nfs4_remove_reclaim_record(crp, nn);
7209 		}
7210 	}
7211 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7212 }
7213 
7214 /*
7215  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7216 struct nfs4_client_reclaim *
7217 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7218 {
7219 	unsigned int strhashval;
7220 	struct nfs4_client_reclaim *crp = NULL;
7221 
7222 	trace_nfsd_clid_find(nn, name.len, name.data);
7223 
7224 	strhashval = clientstr_hashval(name);
7225 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7226 		if (compare_blob(&crp->cr_name, &name) == 0) {
7227 			return crp;
7228 		}
7229 	}
7230 	return NULL;
7231 }
7232 
7233 /*
7234 * Called from OPEN. Look for clientid in reclaim list.
7235 */
7236 __be32
7237 nfs4_check_open_reclaim(clientid_t *clid,
7238 		struct nfsd4_compound_state *cstate,
7239 		struct nfsd_net *nn)
7240 {
7241 	__be32 status;
7242 
7243 	/* find clientid in conf_id_hashtbl */
7244 	status = lookup_clientid(clid, cstate, nn, false);
7245 	if (status)
7246 		return nfserr_reclaim_bad;
7247 
7248 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
7249 		return nfserr_no_grace;
7250 
7251 	if (nfsd4_client_record_check(cstate->clp))
7252 		return nfserr_reclaim_bad;
7253 
7254 	return nfs_ok;
7255 }
7256 
7257 /*
7258  * Since the lifetime of a delegation isn't limited to that of an open, a
7259  * client may quite reasonably hang on to a delegation as long as it has
7260  * the inode cached.  This becomes an obvious problem the first time a
7261  * client's inode cache approaches the size of the server's total memory.
7262  *
7263  * For now we avoid this problem by imposing a hard limit on the number
7264  * of delegations, which varies according to the server's memory size.
7265  */
7266 static void
7267 set_max_delegations(void)
7268 {
7269 	/*
7270 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
7271 	 * estimates suggest that in the worst case (where every delegation
7272 	 * is for a different inode), a delegation could take about 1.5K,
7273 	 * giving a worst case usage of about 6% of memory.
7274 	 */
7275 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7276 }
7277 
7278 static int nfs4_state_create_net(struct net *net)
7279 {
7280 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7281 	int i;
7282 
7283 	nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7284 					    sizeof(struct list_head),
7285 					    GFP_KERNEL);
7286 	if (!nn->conf_id_hashtbl)
7287 		goto err;
7288 	nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7289 					      sizeof(struct list_head),
7290 					      GFP_KERNEL);
7291 	if (!nn->unconf_id_hashtbl)
7292 		goto err_unconf_id;
7293 	nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7294 					      sizeof(struct list_head),
7295 					      GFP_KERNEL);
7296 	if (!nn->sessionid_hashtbl)
7297 		goto err_sessionid;
7298 
7299 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7300 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7301 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7302 	}
7303 	for (i = 0; i < SESSION_HASH_SIZE; i++)
7304 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7305 	nn->conf_name_tree = RB_ROOT;
7306 	nn->unconf_name_tree = RB_ROOT;
7307 	nn->boot_time = ktime_get_real_seconds();
7308 	nn->grace_ended = false;
7309 	nn->nfsd4_manager.block_opens = true;
7310 	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7311 	INIT_LIST_HEAD(&nn->client_lru);
7312 	INIT_LIST_HEAD(&nn->close_lru);
7313 	INIT_LIST_HEAD(&nn->del_recall_lru);
7314 	spin_lock_init(&nn->client_lock);
7315 	spin_lock_init(&nn->s2s_cp_lock);
7316 	idr_init(&nn->s2s_cp_stateids);
7317 
7318 	spin_lock_init(&nn->blocked_locks_lock);
7319 	INIT_LIST_HEAD(&nn->blocked_locks_lru);
7320 
7321 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7322 	get_net(net);
7323 
7324 	return 0;
7325 
7326 err_sessionid:
7327 	kfree(nn->unconf_id_hashtbl);
7328 err_unconf_id:
7329 	kfree(nn->conf_id_hashtbl);
7330 err:
7331 	return -ENOMEM;
7332 }
7333 
7334 static void
7335 nfs4_state_destroy_net(struct net *net)
7336 {
7337 	int i;
7338 	struct nfs4_client *clp = NULL;
7339 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7340 
7341 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7342 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
7343 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7344 			destroy_client(clp);
7345 		}
7346 	}
7347 
7348 	WARN_ON(!list_empty(&nn->blocked_locks_lru));
7349 
7350 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7351 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7352 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7353 			destroy_client(clp);
7354 		}
7355 	}
7356 
7357 	kfree(nn->sessionid_hashtbl);
7358 	kfree(nn->unconf_id_hashtbl);
7359 	kfree(nn->conf_id_hashtbl);
7360 	put_net(net);
7361 }
7362 
7363 int
7364 nfs4_state_start_net(struct net *net)
7365 {
7366 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7367 	int ret;
7368 
7369 	ret = get_nfsdfs(net);
7370 	if (ret)
7371 		return ret;
7372 	ret = nfs4_state_create_net(net);
7373 	if (ret) {
7374 		mntput(nn->nfsd_mnt);
7375 		return ret;
7376 	}
7377 	locks_start_grace(net, &nn->nfsd4_manager);
7378 	nfsd4_client_tracking_init(net);
7379 	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7380 		goto skip_grace;
7381 	printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7382 	       nn->nfsd4_grace, net->ns.inum);
7383 	trace_nfsd_grace_start(nn);
7384 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7385 	return 0;
7386 
7387 skip_grace:
7388 	printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7389 			net->ns.inum);
7390 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7391 	nfsd4_end_grace(nn);
7392 	return 0;
7393 }
7394 
7395 /* initialization to perform when the nfsd service is started: */
7396 
7397 int
7398 nfs4_state_start(void)
7399 {
7400 	int ret;
7401 
7402 	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7403 	if (laundry_wq == NULL) {
7404 		ret = -ENOMEM;
7405 		goto out;
7406 	}
7407 	ret = nfsd4_create_callback_queue();
7408 	if (ret)
7409 		goto out_free_laundry;
7410 
7411 	set_max_delegations();
7412 	return 0;
7413 
7414 out_free_laundry:
7415 	destroy_workqueue(laundry_wq);
7416 out:
7417 	return ret;
7418 }
7419 
7420 void
7421 nfs4_state_shutdown_net(struct net *net)
7422 {
7423 	struct nfs4_delegation *dp = NULL;
7424 	struct list_head *pos, *next, reaplist;
7425 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7426 
7427 	cancel_delayed_work_sync(&nn->laundromat_work);
7428 	locks_end_grace(&nn->nfsd4_manager);
7429 
7430 	INIT_LIST_HEAD(&reaplist);
7431 	spin_lock(&state_lock);
7432 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
7433 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7434 		WARN_ON(!unhash_delegation_locked(dp));
7435 		list_add(&dp->dl_recall_lru, &reaplist);
7436 	}
7437 	spin_unlock(&state_lock);
7438 	list_for_each_safe(pos, next, &reaplist) {
7439 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7440 		list_del_init(&dp->dl_recall_lru);
7441 		destroy_unhashed_deleg(dp);
7442 	}
7443 
7444 	nfsd4_client_tracking_exit(net);
7445 	nfs4_state_destroy_net(net);
7446 	mntput(nn->nfsd_mnt);
7447 }
7448 
7449 void
7450 nfs4_state_shutdown(void)
7451 {
7452 	destroy_workqueue(laundry_wq);
7453 	nfsd4_destroy_callback_queue();
7454 }
7455 
7456 static void
7457 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7458 {
7459 	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7460 	    CURRENT_STATEID(stateid))
7461 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7462 }
7463 
7464 static void
7465 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7466 {
7467 	if (cstate->minorversion) {
7468 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7469 		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7470 	}
7471 }
7472 
7473 void
7474 clear_current_stateid(struct nfsd4_compound_state *cstate)
7475 {
7476 	CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7477 }
7478 
7479 /*
7480  * functions to set current state id
7481  */
7482 void
7483 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7484 		union nfsd4_op_u *u)
7485 {
7486 	put_stateid(cstate, &u->open_downgrade.od_stateid);
7487 }
7488 
7489 void
7490 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7491 		union nfsd4_op_u *u)
7492 {
7493 	put_stateid(cstate, &u->open.op_stateid);
7494 }
7495 
7496 void
7497 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7498 		union nfsd4_op_u *u)
7499 {
7500 	put_stateid(cstate, &u->close.cl_stateid);
7501 }
7502 
7503 void
7504 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7505 		union nfsd4_op_u *u)
7506 {
7507 	put_stateid(cstate, &u->lock.lk_resp_stateid);
7508 }
7509 
7510 /*
7511  * functions to consume current state id
7512  */
7513 
7514 void
7515 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7516 		union nfsd4_op_u *u)
7517 {
7518 	get_stateid(cstate, &u->open_downgrade.od_stateid);
7519 }
7520 
7521 void
7522 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7523 		union nfsd4_op_u *u)
7524 {
7525 	get_stateid(cstate, &u->delegreturn.dr_stateid);
7526 }
7527 
7528 void
7529 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7530 		union nfsd4_op_u *u)
7531 {
7532 	get_stateid(cstate, &u->free_stateid.fr_stateid);
7533 }
7534 
7535 void
7536 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7537 		union nfsd4_op_u *u)
7538 {
7539 	get_stateid(cstate, &u->setattr.sa_stateid);
7540 }
7541 
7542 void
7543 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7544 		union nfsd4_op_u *u)
7545 {
7546 	get_stateid(cstate, &u->close.cl_stateid);
7547 }
7548 
7549 void
7550 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7551 		union nfsd4_op_u *u)
7552 {
7553 	get_stateid(cstate, &u->locku.lu_stateid);
7554 }
7555 
7556 void
7557 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7558 		union nfsd4_op_u *u)
7559 {
7560 	get_stateid(cstate, &u->read.rd_stateid);
7561 }
7562 
7563 void
7564 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7565 		union nfsd4_op_u *u)
7566 {
7567 	get_stateid(cstate, &u->write.wr_stateid);
7568 }
7569