xref: /openbmc/linux/fs/nfsd/nfs4state.c (revision c4849f88)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include <linux/nfs_ssc.h>
48 #include "xdr4.h"
49 #include "xdr4cb.h"
50 #include "vfs.h"
51 #include "current_stateid.h"
52 
53 #include "netns.h"
54 #include "pnfs.h"
55 #include "filecache.h"
56 #include "trace.h"
57 
58 #define NFSDDBG_FACILITY                NFSDDBG_PROC
59 
60 #define all_ones {{~0,~0},~0}
61 static const stateid_t one_stateid = {
62 	.si_generation = ~0,
63 	.si_opaque = all_ones,
64 };
65 static const stateid_t zero_stateid = {
66 	/* all fields zero */
67 };
68 static const stateid_t currentstateid = {
69 	.si_generation = 1,
70 };
71 static const stateid_t close_stateid = {
72 	.si_generation = 0xffffffffU,
73 };
74 
75 static u64 current_sessionid = 1;
76 
77 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
78 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
79 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
80 #define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
81 
82 /* forward declarations */
83 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
84 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
85 void nfsd4_end_grace(struct nfsd_net *nn);
86 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
87 
88 /* Locking: */
89 
90 /*
91  * Currently used for the del_recall_lru and file hash table.  In an
92  * effort to decrease the scope of the client_mutex, this spinlock may
93  * eventually cover more:
94  */
95 static DEFINE_SPINLOCK(state_lock);
96 
97 enum nfsd4_st_mutex_lock_subclass {
98 	OPEN_STATEID_MUTEX = 0,
99 	LOCK_STATEID_MUTEX = 1,
100 };
101 
102 /*
103  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
104  * the refcount on the open stateid to drop.
105  */
106 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
107 
108 /*
109  * A waitqueue where a writer to clients/#/ctl destroying a client can
110  * wait for cl_rpc_users to drop to 0 and then for the client to be
111  * unhashed.
112  */
113 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
114 
115 static struct kmem_cache *client_slab;
116 static struct kmem_cache *openowner_slab;
117 static struct kmem_cache *lockowner_slab;
118 static struct kmem_cache *file_slab;
119 static struct kmem_cache *stateid_slab;
120 static struct kmem_cache *deleg_slab;
121 static struct kmem_cache *odstate_slab;
122 
123 static void free_session(struct nfsd4_session *);
124 
125 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
126 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
127 
128 static bool is_session_dead(struct nfsd4_session *ses)
129 {
130 	return ses->se_flags & NFS4_SESSION_DEAD;
131 }
132 
133 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
134 {
135 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
136 		return nfserr_jukebox;
137 	ses->se_flags |= NFS4_SESSION_DEAD;
138 	return nfs_ok;
139 }
140 
141 static bool is_client_expired(struct nfs4_client *clp)
142 {
143 	return clp->cl_time == 0;
144 }
145 
146 static __be32 get_client_locked(struct nfs4_client *clp)
147 {
148 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
149 
150 	lockdep_assert_held(&nn->client_lock);
151 
152 	if (is_client_expired(clp))
153 		return nfserr_expired;
154 	atomic_inc(&clp->cl_rpc_users);
155 	return nfs_ok;
156 }
157 
158 /* must be called under the client_lock */
159 static inline void
160 renew_client_locked(struct nfs4_client *clp)
161 {
162 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
163 
164 	if (is_client_expired(clp)) {
165 		WARN_ON(1);
166 		printk("%s: client (clientid %08x/%08x) already expired\n",
167 			__func__,
168 			clp->cl_clientid.cl_boot,
169 			clp->cl_clientid.cl_id);
170 		return;
171 	}
172 
173 	list_move_tail(&clp->cl_lru, &nn->client_lru);
174 	clp->cl_time = ktime_get_boottime_seconds();
175 }
176 
177 static void put_client_renew_locked(struct nfs4_client *clp)
178 {
179 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
180 
181 	lockdep_assert_held(&nn->client_lock);
182 
183 	if (!atomic_dec_and_test(&clp->cl_rpc_users))
184 		return;
185 	if (!is_client_expired(clp))
186 		renew_client_locked(clp);
187 	else
188 		wake_up_all(&expiry_wq);
189 }
190 
191 static void put_client_renew(struct nfs4_client *clp)
192 {
193 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
194 
195 	if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
196 		return;
197 	if (!is_client_expired(clp))
198 		renew_client_locked(clp);
199 	else
200 		wake_up_all(&expiry_wq);
201 	spin_unlock(&nn->client_lock);
202 }
203 
204 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
205 {
206 	__be32 status;
207 
208 	if (is_session_dead(ses))
209 		return nfserr_badsession;
210 	status = get_client_locked(ses->se_client);
211 	if (status)
212 		return status;
213 	atomic_inc(&ses->se_ref);
214 	return nfs_ok;
215 }
216 
217 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
218 {
219 	struct nfs4_client *clp = ses->se_client;
220 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
221 
222 	lockdep_assert_held(&nn->client_lock);
223 
224 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
225 		free_session(ses);
226 	put_client_renew_locked(clp);
227 }
228 
229 static void nfsd4_put_session(struct nfsd4_session *ses)
230 {
231 	struct nfs4_client *clp = ses->se_client;
232 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
233 
234 	spin_lock(&nn->client_lock);
235 	nfsd4_put_session_locked(ses);
236 	spin_unlock(&nn->client_lock);
237 }
238 
239 static struct nfsd4_blocked_lock *
240 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
241 			struct nfsd_net *nn)
242 {
243 	struct nfsd4_blocked_lock *cur, *found = NULL;
244 
245 	spin_lock(&nn->blocked_locks_lock);
246 	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
247 		if (fh_match(fh, &cur->nbl_fh)) {
248 			list_del_init(&cur->nbl_list);
249 			list_del_init(&cur->nbl_lru);
250 			found = cur;
251 			break;
252 		}
253 	}
254 	spin_unlock(&nn->blocked_locks_lock);
255 	if (found)
256 		locks_delete_block(&found->nbl_lock);
257 	return found;
258 }
259 
260 static struct nfsd4_blocked_lock *
261 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
262 			struct nfsd_net *nn)
263 {
264 	struct nfsd4_blocked_lock *nbl;
265 
266 	nbl = find_blocked_lock(lo, fh, nn);
267 	if (!nbl) {
268 		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
269 		if (nbl) {
270 			INIT_LIST_HEAD(&nbl->nbl_list);
271 			INIT_LIST_HEAD(&nbl->nbl_lru);
272 			fh_copy_shallow(&nbl->nbl_fh, fh);
273 			locks_init_lock(&nbl->nbl_lock);
274 			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
275 					&nfsd4_cb_notify_lock_ops,
276 					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
277 		}
278 	}
279 	return nbl;
280 }
281 
282 static void
283 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
284 {
285 	locks_delete_block(&nbl->nbl_lock);
286 	locks_release_private(&nbl->nbl_lock);
287 	kfree(nbl);
288 }
289 
290 static void
291 remove_blocked_locks(struct nfs4_lockowner *lo)
292 {
293 	struct nfs4_client *clp = lo->lo_owner.so_client;
294 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
295 	struct nfsd4_blocked_lock *nbl;
296 	LIST_HEAD(reaplist);
297 
298 	/* Dequeue all blocked locks */
299 	spin_lock(&nn->blocked_locks_lock);
300 	while (!list_empty(&lo->lo_blocked)) {
301 		nbl = list_first_entry(&lo->lo_blocked,
302 					struct nfsd4_blocked_lock,
303 					nbl_list);
304 		list_del_init(&nbl->nbl_list);
305 		list_move(&nbl->nbl_lru, &reaplist);
306 	}
307 	spin_unlock(&nn->blocked_locks_lock);
308 
309 	/* Now free them */
310 	while (!list_empty(&reaplist)) {
311 		nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
312 					nbl_lru);
313 		list_del_init(&nbl->nbl_lru);
314 		free_blocked_lock(nbl);
315 	}
316 }
317 
318 static void
319 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
320 {
321 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
322 						struct nfsd4_blocked_lock, nbl_cb);
323 	locks_delete_block(&nbl->nbl_lock);
324 }
325 
326 static int
327 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
328 {
329 	/*
330 	 * Since this is just an optimization, we don't try very hard if it
331 	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
332 	 * just quit trying on anything else.
333 	 */
334 	switch (task->tk_status) {
335 	case -NFS4ERR_DELAY:
336 		rpc_delay(task, 1 * HZ);
337 		return 0;
338 	default:
339 		return 1;
340 	}
341 }
342 
343 static void
344 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
345 {
346 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
347 						struct nfsd4_blocked_lock, nbl_cb);
348 
349 	free_blocked_lock(nbl);
350 }
351 
352 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
353 	.prepare	= nfsd4_cb_notify_lock_prepare,
354 	.done		= nfsd4_cb_notify_lock_done,
355 	.release	= nfsd4_cb_notify_lock_release,
356 };
357 
358 /*
359  * We store the NONE, READ, WRITE, and BOTH bits separately in the
360  * st_{access,deny}_bmap field of the stateid, in order to track not
361  * only what share bits are currently in force, but also what
362  * combinations of share bits previous opens have used.  This allows us
363  * to enforce the recommendation of rfc 3530 14.2.19 that the server
364  * return an error if the client attempt to downgrade to a combination
365  * of share bits not explicable by closing some of its previous opens.
366  *
367  * XXX: This enforcement is actually incomplete, since we don't keep
368  * track of access/deny bit combinations; so, e.g., we allow:
369  *
370  *	OPEN allow read, deny write
371  *	OPEN allow both, deny none
372  *	DOWNGRADE allow read, deny none
373  *
374  * which we should reject.
375  */
376 static unsigned int
377 bmap_to_share_mode(unsigned long bmap)
378 {
379 	int i;
380 	unsigned int access = 0;
381 
382 	for (i = 1; i < 4; i++) {
383 		if (test_bit(i, &bmap))
384 			access |= i;
385 	}
386 	return access;
387 }
388 
389 /* set share access for a given stateid */
390 static inline void
391 set_access(u32 access, struct nfs4_ol_stateid *stp)
392 {
393 	unsigned char mask = 1 << access;
394 
395 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
396 	stp->st_access_bmap |= mask;
397 }
398 
399 /* clear share access for a given stateid */
400 static inline void
401 clear_access(u32 access, struct nfs4_ol_stateid *stp)
402 {
403 	unsigned char mask = 1 << access;
404 
405 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
406 	stp->st_access_bmap &= ~mask;
407 }
408 
409 /* test whether a given stateid has access */
410 static inline bool
411 test_access(u32 access, struct nfs4_ol_stateid *stp)
412 {
413 	unsigned char mask = 1 << access;
414 
415 	return (bool)(stp->st_access_bmap & mask);
416 }
417 
418 /* set share deny for a given stateid */
419 static inline void
420 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
421 {
422 	unsigned char mask = 1 << deny;
423 
424 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
425 	stp->st_deny_bmap |= mask;
426 }
427 
428 /* clear share deny for a given stateid */
429 static inline void
430 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
431 {
432 	unsigned char mask = 1 << deny;
433 
434 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
435 	stp->st_deny_bmap &= ~mask;
436 }
437 
438 /* test whether a given stateid is denying specific access */
439 static inline bool
440 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
441 {
442 	unsigned char mask = 1 << deny;
443 
444 	return (bool)(stp->st_deny_bmap & mask);
445 }
446 
447 static int nfs4_access_to_omode(u32 access)
448 {
449 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
450 	case NFS4_SHARE_ACCESS_READ:
451 		return O_RDONLY;
452 	case NFS4_SHARE_ACCESS_WRITE:
453 		return O_WRONLY;
454 	case NFS4_SHARE_ACCESS_BOTH:
455 		return O_RDWR;
456 	}
457 	WARN_ON_ONCE(1);
458 	return O_RDONLY;
459 }
460 
461 static inline int
462 access_permit_read(struct nfs4_ol_stateid *stp)
463 {
464 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
465 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
466 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
467 }
468 
469 static inline int
470 access_permit_write(struct nfs4_ol_stateid *stp)
471 {
472 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
473 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
474 }
475 
476 static inline struct nfs4_stateowner *
477 nfs4_get_stateowner(struct nfs4_stateowner *sop)
478 {
479 	atomic_inc(&sop->so_count);
480 	return sop;
481 }
482 
483 static int
484 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
485 {
486 	return (sop->so_owner.len == owner->len) &&
487 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
488 }
489 
490 static struct nfs4_openowner *
491 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
492 			struct nfs4_client *clp)
493 {
494 	struct nfs4_stateowner *so;
495 
496 	lockdep_assert_held(&clp->cl_lock);
497 
498 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
499 			    so_strhash) {
500 		if (!so->so_is_open_owner)
501 			continue;
502 		if (same_owner_str(so, &open->op_owner))
503 			return openowner(nfs4_get_stateowner(so));
504 	}
505 	return NULL;
506 }
507 
508 static struct nfs4_openowner *
509 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
510 			struct nfs4_client *clp)
511 {
512 	struct nfs4_openowner *oo;
513 
514 	spin_lock(&clp->cl_lock);
515 	oo = find_openstateowner_str_locked(hashval, open, clp);
516 	spin_unlock(&clp->cl_lock);
517 	return oo;
518 }
519 
520 static inline u32
521 opaque_hashval(const void *ptr, int nbytes)
522 {
523 	unsigned char *cptr = (unsigned char *) ptr;
524 
525 	u32 x = 0;
526 	while (nbytes--) {
527 		x *= 37;
528 		x += *cptr++;
529 	}
530 	return x;
531 }
532 
533 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
534 {
535 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
536 
537 	kmem_cache_free(file_slab, fp);
538 }
539 
540 void
541 put_nfs4_file(struct nfs4_file *fi)
542 {
543 	might_lock(&state_lock);
544 
545 	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
546 		hlist_del_rcu(&fi->fi_hash);
547 		spin_unlock(&state_lock);
548 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
549 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
550 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
551 	}
552 }
553 
554 static struct nfsd_file *
555 __nfs4_get_fd(struct nfs4_file *f, int oflag)
556 {
557 	if (f->fi_fds[oflag])
558 		return nfsd_file_get(f->fi_fds[oflag]);
559 	return NULL;
560 }
561 
562 static struct nfsd_file *
563 find_writeable_file_locked(struct nfs4_file *f)
564 {
565 	struct nfsd_file *ret;
566 
567 	lockdep_assert_held(&f->fi_lock);
568 
569 	ret = __nfs4_get_fd(f, O_WRONLY);
570 	if (!ret)
571 		ret = __nfs4_get_fd(f, O_RDWR);
572 	return ret;
573 }
574 
575 static struct nfsd_file *
576 find_writeable_file(struct nfs4_file *f)
577 {
578 	struct nfsd_file *ret;
579 
580 	spin_lock(&f->fi_lock);
581 	ret = find_writeable_file_locked(f);
582 	spin_unlock(&f->fi_lock);
583 
584 	return ret;
585 }
586 
587 static struct nfsd_file *
588 find_readable_file_locked(struct nfs4_file *f)
589 {
590 	struct nfsd_file *ret;
591 
592 	lockdep_assert_held(&f->fi_lock);
593 
594 	ret = __nfs4_get_fd(f, O_RDONLY);
595 	if (!ret)
596 		ret = __nfs4_get_fd(f, O_RDWR);
597 	return ret;
598 }
599 
600 static struct nfsd_file *
601 find_readable_file(struct nfs4_file *f)
602 {
603 	struct nfsd_file *ret;
604 
605 	spin_lock(&f->fi_lock);
606 	ret = find_readable_file_locked(f);
607 	spin_unlock(&f->fi_lock);
608 
609 	return ret;
610 }
611 
612 struct nfsd_file *
613 find_any_file(struct nfs4_file *f)
614 {
615 	struct nfsd_file *ret;
616 
617 	if (!f)
618 		return NULL;
619 	spin_lock(&f->fi_lock);
620 	ret = __nfs4_get_fd(f, O_RDWR);
621 	if (!ret) {
622 		ret = __nfs4_get_fd(f, O_WRONLY);
623 		if (!ret)
624 			ret = __nfs4_get_fd(f, O_RDONLY);
625 	}
626 	spin_unlock(&f->fi_lock);
627 	return ret;
628 }
629 
630 static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
631 {
632 	struct nfsd_file *ret = NULL;
633 
634 	spin_lock(&f->fi_lock);
635 	if (f->fi_deleg_file)
636 		ret = nfsd_file_get(f->fi_deleg_file);
637 	spin_unlock(&f->fi_lock);
638 	return ret;
639 }
640 
641 static atomic_long_t num_delegations;
642 unsigned long max_delegations;
643 
644 /*
645  * Open owner state (share locks)
646  */
647 
648 /* hash tables for lock and open owners */
649 #define OWNER_HASH_BITS              8
650 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
651 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
652 
653 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
654 {
655 	unsigned int ret;
656 
657 	ret = opaque_hashval(ownername->data, ownername->len);
658 	return ret & OWNER_HASH_MASK;
659 }
660 
661 /* hash table for nfs4_file */
662 #define FILE_HASH_BITS                   8
663 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
664 
665 static unsigned int file_hashval(struct svc_fh *fh)
666 {
667 	struct inode *inode = d_inode(fh->fh_dentry);
668 
669 	/* XXX: why not (here & in file cache) use inode? */
670 	return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
671 }
672 
673 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
674 
675 static void
676 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
677 {
678 	lockdep_assert_held(&fp->fi_lock);
679 
680 	if (access & NFS4_SHARE_ACCESS_WRITE)
681 		atomic_inc(&fp->fi_access[O_WRONLY]);
682 	if (access & NFS4_SHARE_ACCESS_READ)
683 		atomic_inc(&fp->fi_access[O_RDONLY]);
684 }
685 
686 static __be32
687 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
688 {
689 	lockdep_assert_held(&fp->fi_lock);
690 
691 	/* Does this access mode make sense? */
692 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
693 		return nfserr_inval;
694 
695 	/* Does it conflict with a deny mode already set? */
696 	if ((access & fp->fi_share_deny) != 0)
697 		return nfserr_share_denied;
698 
699 	__nfs4_file_get_access(fp, access);
700 	return nfs_ok;
701 }
702 
703 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
704 {
705 	/* Common case is that there is no deny mode. */
706 	if (deny) {
707 		/* Does this deny mode make sense? */
708 		if (deny & ~NFS4_SHARE_DENY_BOTH)
709 			return nfserr_inval;
710 
711 		if ((deny & NFS4_SHARE_DENY_READ) &&
712 		    atomic_read(&fp->fi_access[O_RDONLY]))
713 			return nfserr_share_denied;
714 
715 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
716 		    atomic_read(&fp->fi_access[O_WRONLY]))
717 			return nfserr_share_denied;
718 	}
719 	return nfs_ok;
720 }
721 
722 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
723 {
724 	might_lock(&fp->fi_lock);
725 
726 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
727 		struct nfsd_file *f1 = NULL;
728 		struct nfsd_file *f2 = NULL;
729 
730 		swap(f1, fp->fi_fds[oflag]);
731 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
732 			swap(f2, fp->fi_fds[O_RDWR]);
733 		spin_unlock(&fp->fi_lock);
734 		if (f1)
735 			nfsd_file_put(f1);
736 		if (f2)
737 			nfsd_file_put(f2);
738 	}
739 }
740 
741 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
742 {
743 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
744 
745 	if (access & NFS4_SHARE_ACCESS_WRITE)
746 		__nfs4_file_put_access(fp, O_WRONLY);
747 	if (access & NFS4_SHARE_ACCESS_READ)
748 		__nfs4_file_put_access(fp, O_RDONLY);
749 }
750 
751 /*
752  * Allocate a new open/delegation state counter. This is needed for
753  * pNFS for proper return on close semantics.
754  *
755  * Note that we only allocate it for pNFS-enabled exports, otherwise
756  * all pointers to struct nfs4_clnt_odstate are always NULL.
757  */
758 static struct nfs4_clnt_odstate *
759 alloc_clnt_odstate(struct nfs4_client *clp)
760 {
761 	struct nfs4_clnt_odstate *co;
762 
763 	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
764 	if (co) {
765 		co->co_client = clp;
766 		refcount_set(&co->co_odcount, 1);
767 	}
768 	return co;
769 }
770 
771 static void
772 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
773 {
774 	struct nfs4_file *fp = co->co_file;
775 
776 	lockdep_assert_held(&fp->fi_lock);
777 	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
778 }
779 
780 static inline void
781 get_clnt_odstate(struct nfs4_clnt_odstate *co)
782 {
783 	if (co)
784 		refcount_inc(&co->co_odcount);
785 }
786 
787 static void
788 put_clnt_odstate(struct nfs4_clnt_odstate *co)
789 {
790 	struct nfs4_file *fp;
791 
792 	if (!co)
793 		return;
794 
795 	fp = co->co_file;
796 	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
797 		list_del(&co->co_perfile);
798 		spin_unlock(&fp->fi_lock);
799 
800 		nfsd4_return_all_file_layouts(co->co_client, fp);
801 		kmem_cache_free(odstate_slab, co);
802 	}
803 }
804 
805 static struct nfs4_clnt_odstate *
806 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
807 {
808 	struct nfs4_clnt_odstate *co;
809 	struct nfs4_client *cl;
810 
811 	if (!new)
812 		return NULL;
813 
814 	cl = new->co_client;
815 
816 	spin_lock(&fp->fi_lock);
817 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
818 		if (co->co_client == cl) {
819 			get_clnt_odstate(co);
820 			goto out;
821 		}
822 	}
823 	co = new;
824 	co->co_file = fp;
825 	hash_clnt_odstate_locked(new);
826 out:
827 	spin_unlock(&fp->fi_lock);
828 	return co;
829 }
830 
831 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
832 				  void (*sc_free)(struct nfs4_stid *))
833 {
834 	struct nfs4_stid *stid;
835 	int new_id;
836 
837 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
838 	if (!stid)
839 		return NULL;
840 
841 	idr_preload(GFP_KERNEL);
842 	spin_lock(&cl->cl_lock);
843 	/* Reserving 0 for start of file in nfsdfs "states" file: */
844 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
845 	spin_unlock(&cl->cl_lock);
846 	idr_preload_end();
847 	if (new_id < 0)
848 		goto out_free;
849 
850 	stid->sc_free = sc_free;
851 	stid->sc_client = cl;
852 	stid->sc_stateid.si_opaque.so_id = new_id;
853 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
854 	/* Will be incremented before return to client: */
855 	refcount_set(&stid->sc_count, 1);
856 	spin_lock_init(&stid->sc_lock);
857 	INIT_LIST_HEAD(&stid->sc_cp_list);
858 
859 	/*
860 	 * It shouldn't be a problem to reuse an opaque stateid value.
861 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
862 	 * example, a stray write retransmission could be accepted by
863 	 * the server when it should have been rejected.  Therefore,
864 	 * adopt a trick from the sctp code to attempt to maximize the
865 	 * amount of time until an id is reused, by ensuring they always
866 	 * "increase" (mod INT_MAX):
867 	 */
868 	return stid;
869 out_free:
870 	kmem_cache_free(slab, stid);
871 	return NULL;
872 }
873 
874 /*
875  * Create a unique stateid_t to represent each COPY.
876  */
877 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
878 			      unsigned char sc_type)
879 {
880 	int new_id;
881 
882 	stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
883 	stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
884 	stid->sc_type = sc_type;
885 
886 	idr_preload(GFP_KERNEL);
887 	spin_lock(&nn->s2s_cp_lock);
888 	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
889 	stid->stid.si_opaque.so_id = new_id;
890 	stid->stid.si_generation = 1;
891 	spin_unlock(&nn->s2s_cp_lock);
892 	idr_preload_end();
893 	if (new_id < 0)
894 		return 0;
895 	return 1;
896 }
897 
898 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
899 {
900 	return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
901 }
902 
903 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
904 						     struct nfs4_stid *p_stid)
905 {
906 	struct nfs4_cpntf_state *cps;
907 
908 	cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
909 	if (!cps)
910 		return NULL;
911 	cps->cpntf_time = ktime_get_boottime_seconds();
912 	refcount_set(&cps->cp_stateid.sc_count, 1);
913 	if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
914 		goto out_free;
915 	spin_lock(&nn->s2s_cp_lock);
916 	list_add(&cps->cp_list, &p_stid->sc_cp_list);
917 	spin_unlock(&nn->s2s_cp_lock);
918 	return cps;
919 out_free:
920 	kfree(cps);
921 	return NULL;
922 }
923 
924 void nfs4_free_copy_state(struct nfsd4_copy *copy)
925 {
926 	struct nfsd_net *nn;
927 
928 	WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
929 	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
930 	spin_lock(&nn->s2s_cp_lock);
931 	idr_remove(&nn->s2s_cp_stateids,
932 		   copy->cp_stateid.stid.si_opaque.so_id);
933 	spin_unlock(&nn->s2s_cp_lock);
934 }
935 
936 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
937 {
938 	struct nfs4_cpntf_state *cps;
939 	struct nfsd_net *nn;
940 
941 	nn = net_generic(net, nfsd_net_id);
942 	spin_lock(&nn->s2s_cp_lock);
943 	while (!list_empty(&stid->sc_cp_list)) {
944 		cps = list_first_entry(&stid->sc_cp_list,
945 				       struct nfs4_cpntf_state, cp_list);
946 		_free_cpntf_state_locked(nn, cps);
947 	}
948 	spin_unlock(&nn->s2s_cp_lock);
949 }
950 
951 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
952 {
953 	struct nfs4_stid *stid;
954 
955 	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
956 	if (!stid)
957 		return NULL;
958 
959 	return openlockstateid(stid);
960 }
961 
962 static void nfs4_free_deleg(struct nfs4_stid *stid)
963 {
964 	kmem_cache_free(deleg_slab, stid);
965 	atomic_long_dec(&num_delegations);
966 }
967 
968 /*
969  * When we recall a delegation, we should be careful not to hand it
970  * out again straight away.
971  * To ensure this we keep a pair of bloom filters ('new' and 'old')
972  * in which the filehandles of recalled delegations are "stored".
973  * If a filehandle appear in either filter, a delegation is blocked.
974  * When a delegation is recalled, the filehandle is stored in the "new"
975  * filter.
976  * Every 30 seconds we swap the filters and clear the "new" one,
977  * unless both are empty of course.
978  *
979  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
980  * low 3 bytes as hash-table indices.
981  *
982  * 'blocked_delegations_lock', which is always taken in block_delegations(),
983  * is used to manage concurrent access.  Testing does not need the lock
984  * except when swapping the two filters.
985  */
986 static DEFINE_SPINLOCK(blocked_delegations_lock);
987 static struct bloom_pair {
988 	int	entries, old_entries;
989 	time64_t swap_time;
990 	int	new; /* index into 'set' */
991 	DECLARE_BITMAP(set[2], 256);
992 } blocked_delegations;
993 
994 static int delegation_blocked(struct knfsd_fh *fh)
995 {
996 	u32 hash;
997 	struct bloom_pair *bd = &blocked_delegations;
998 
999 	if (bd->entries == 0)
1000 		return 0;
1001 	if (ktime_get_seconds() - bd->swap_time > 30) {
1002 		spin_lock(&blocked_delegations_lock);
1003 		if (ktime_get_seconds() - bd->swap_time > 30) {
1004 			bd->entries -= bd->old_entries;
1005 			bd->old_entries = bd->entries;
1006 			memset(bd->set[bd->new], 0,
1007 			       sizeof(bd->set[0]));
1008 			bd->new = 1-bd->new;
1009 			bd->swap_time = ktime_get_seconds();
1010 		}
1011 		spin_unlock(&blocked_delegations_lock);
1012 	}
1013 	hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1014 	if (test_bit(hash&255, bd->set[0]) &&
1015 	    test_bit((hash>>8)&255, bd->set[0]) &&
1016 	    test_bit((hash>>16)&255, bd->set[0]))
1017 		return 1;
1018 
1019 	if (test_bit(hash&255, bd->set[1]) &&
1020 	    test_bit((hash>>8)&255, bd->set[1]) &&
1021 	    test_bit((hash>>16)&255, bd->set[1]))
1022 		return 1;
1023 
1024 	return 0;
1025 }
1026 
1027 static void block_delegations(struct knfsd_fh *fh)
1028 {
1029 	u32 hash;
1030 	struct bloom_pair *bd = &blocked_delegations;
1031 
1032 	hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1033 
1034 	spin_lock(&blocked_delegations_lock);
1035 	__set_bit(hash&255, bd->set[bd->new]);
1036 	__set_bit((hash>>8)&255, bd->set[bd->new]);
1037 	__set_bit((hash>>16)&255, bd->set[bd->new]);
1038 	if (bd->entries == 0)
1039 		bd->swap_time = ktime_get_seconds();
1040 	bd->entries += 1;
1041 	spin_unlock(&blocked_delegations_lock);
1042 }
1043 
1044 static struct nfs4_delegation *
1045 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1046 		 struct svc_fh *current_fh,
1047 		 struct nfs4_clnt_odstate *odstate)
1048 {
1049 	struct nfs4_delegation *dp;
1050 	long n;
1051 
1052 	dprintk("NFSD alloc_init_deleg\n");
1053 	n = atomic_long_inc_return(&num_delegations);
1054 	if (n < 0 || n > max_delegations)
1055 		goto out_dec;
1056 	if (delegation_blocked(&current_fh->fh_handle))
1057 		goto out_dec;
1058 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1059 	if (dp == NULL)
1060 		goto out_dec;
1061 
1062 	/*
1063 	 * delegation seqid's are never incremented.  The 4.1 special
1064 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1065 	 * 0 anyway just for consistency and use 1:
1066 	 */
1067 	dp->dl_stid.sc_stateid.si_generation = 1;
1068 	INIT_LIST_HEAD(&dp->dl_perfile);
1069 	INIT_LIST_HEAD(&dp->dl_perclnt);
1070 	INIT_LIST_HEAD(&dp->dl_recall_lru);
1071 	dp->dl_clnt_odstate = odstate;
1072 	get_clnt_odstate(odstate);
1073 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
1074 	dp->dl_retries = 1;
1075 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1076 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1077 	get_nfs4_file(fp);
1078 	dp->dl_stid.sc_file = fp;
1079 	return dp;
1080 out_dec:
1081 	atomic_long_dec(&num_delegations);
1082 	return NULL;
1083 }
1084 
1085 void
1086 nfs4_put_stid(struct nfs4_stid *s)
1087 {
1088 	struct nfs4_file *fp = s->sc_file;
1089 	struct nfs4_client *clp = s->sc_client;
1090 
1091 	might_lock(&clp->cl_lock);
1092 
1093 	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1094 		wake_up_all(&close_wq);
1095 		return;
1096 	}
1097 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1098 	nfs4_free_cpntf_statelist(clp->net, s);
1099 	spin_unlock(&clp->cl_lock);
1100 	s->sc_free(s);
1101 	if (fp)
1102 		put_nfs4_file(fp);
1103 }
1104 
1105 void
1106 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1107 {
1108 	stateid_t *src = &stid->sc_stateid;
1109 
1110 	spin_lock(&stid->sc_lock);
1111 	if (unlikely(++src->si_generation == 0))
1112 		src->si_generation = 1;
1113 	memcpy(dst, src, sizeof(*dst));
1114 	spin_unlock(&stid->sc_lock);
1115 }
1116 
1117 static void put_deleg_file(struct nfs4_file *fp)
1118 {
1119 	struct nfsd_file *nf = NULL;
1120 
1121 	spin_lock(&fp->fi_lock);
1122 	if (--fp->fi_delegees == 0)
1123 		swap(nf, fp->fi_deleg_file);
1124 	spin_unlock(&fp->fi_lock);
1125 
1126 	if (nf)
1127 		nfsd_file_put(nf);
1128 }
1129 
1130 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1131 {
1132 	struct nfs4_file *fp = dp->dl_stid.sc_file;
1133 	struct nfsd_file *nf = fp->fi_deleg_file;
1134 
1135 	WARN_ON_ONCE(!fp->fi_delegees);
1136 
1137 	vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1138 	put_deleg_file(fp);
1139 }
1140 
1141 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1142 {
1143 	put_clnt_odstate(dp->dl_clnt_odstate);
1144 	nfs4_unlock_deleg_lease(dp);
1145 	nfs4_put_stid(&dp->dl_stid);
1146 }
1147 
1148 void nfs4_unhash_stid(struct nfs4_stid *s)
1149 {
1150 	s->sc_type = 0;
1151 }
1152 
1153 /**
1154  * nfs4_delegation_exists - Discover if this delegation already exists
1155  * @clp:     a pointer to the nfs4_client we're granting a delegation to
1156  * @fp:      a pointer to the nfs4_file we're granting a delegation on
1157  *
1158  * Return:
1159  *      On success: true iff an existing delegation is found
1160  */
1161 
1162 static bool
1163 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1164 {
1165 	struct nfs4_delegation *searchdp = NULL;
1166 	struct nfs4_client *searchclp = NULL;
1167 
1168 	lockdep_assert_held(&state_lock);
1169 	lockdep_assert_held(&fp->fi_lock);
1170 
1171 	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1172 		searchclp = searchdp->dl_stid.sc_client;
1173 		if (clp == searchclp) {
1174 			return true;
1175 		}
1176 	}
1177 	return false;
1178 }
1179 
1180 /**
1181  * hash_delegation_locked - Add a delegation to the appropriate lists
1182  * @dp:     a pointer to the nfs4_delegation we are adding.
1183  * @fp:     a pointer to the nfs4_file we're granting a delegation on
1184  *
1185  * Return:
1186  *      On success: NULL if the delegation was successfully hashed.
1187  *
1188  *      On error: -EAGAIN if one was previously granted to this
1189  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
1190  *
1191  */
1192 
1193 static int
1194 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1195 {
1196 	struct nfs4_client *clp = dp->dl_stid.sc_client;
1197 
1198 	lockdep_assert_held(&state_lock);
1199 	lockdep_assert_held(&fp->fi_lock);
1200 
1201 	if (nfs4_delegation_exists(clp, fp))
1202 		return -EAGAIN;
1203 	refcount_inc(&dp->dl_stid.sc_count);
1204 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
1205 	list_add(&dp->dl_perfile, &fp->fi_delegations);
1206 	list_add(&dp->dl_perclnt, &clp->cl_delegations);
1207 	return 0;
1208 }
1209 
1210 static bool delegation_hashed(struct nfs4_delegation *dp)
1211 {
1212 	return !(list_empty(&dp->dl_perfile));
1213 }
1214 
1215 static bool
1216 unhash_delegation_locked(struct nfs4_delegation *dp)
1217 {
1218 	struct nfs4_file *fp = dp->dl_stid.sc_file;
1219 
1220 	lockdep_assert_held(&state_lock);
1221 
1222 	if (!delegation_hashed(dp))
1223 		return false;
1224 
1225 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1226 	/* Ensure that deleg break won't try to requeue it */
1227 	++dp->dl_time;
1228 	spin_lock(&fp->fi_lock);
1229 	list_del_init(&dp->dl_perclnt);
1230 	list_del_init(&dp->dl_recall_lru);
1231 	list_del_init(&dp->dl_perfile);
1232 	spin_unlock(&fp->fi_lock);
1233 	return true;
1234 }
1235 
1236 static void destroy_delegation(struct nfs4_delegation *dp)
1237 {
1238 	bool unhashed;
1239 
1240 	spin_lock(&state_lock);
1241 	unhashed = unhash_delegation_locked(dp);
1242 	spin_unlock(&state_lock);
1243 	if (unhashed)
1244 		destroy_unhashed_deleg(dp);
1245 }
1246 
1247 static void revoke_delegation(struct nfs4_delegation *dp)
1248 {
1249 	struct nfs4_client *clp = dp->dl_stid.sc_client;
1250 
1251 	WARN_ON(!list_empty(&dp->dl_recall_lru));
1252 
1253 	if (clp->cl_minorversion) {
1254 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1255 		refcount_inc(&dp->dl_stid.sc_count);
1256 		spin_lock(&clp->cl_lock);
1257 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1258 		spin_unlock(&clp->cl_lock);
1259 	}
1260 	destroy_unhashed_deleg(dp);
1261 }
1262 
1263 /*
1264  * SETCLIENTID state
1265  */
1266 
1267 static unsigned int clientid_hashval(u32 id)
1268 {
1269 	return id & CLIENT_HASH_MASK;
1270 }
1271 
1272 static unsigned int clientstr_hashval(struct xdr_netobj name)
1273 {
1274 	return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1275 }
1276 
1277 /*
1278  * A stateid that had a deny mode associated with it is being released
1279  * or downgraded. Recalculate the deny mode on the file.
1280  */
1281 static void
1282 recalculate_deny_mode(struct nfs4_file *fp)
1283 {
1284 	struct nfs4_ol_stateid *stp;
1285 
1286 	spin_lock(&fp->fi_lock);
1287 	fp->fi_share_deny = 0;
1288 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1289 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1290 	spin_unlock(&fp->fi_lock);
1291 }
1292 
1293 static void
1294 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1295 {
1296 	int i;
1297 	bool change = false;
1298 
1299 	for (i = 1; i < 4; i++) {
1300 		if ((i & deny) != i) {
1301 			change = true;
1302 			clear_deny(i, stp);
1303 		}
1304 	}
1305 
1306 	/* Recalculate per-file deny mode if there was a change */
1307 	if (change)
1308 		recalculate_deny_mode(stp->st_stid.sc_file);
1309 }
1310 
1311 /* release all access and file references for a given stateid */
1312 static void
1313 release_all_access(struct nfs4_ol_stateid *stp)
1314 {
1315 	int i;
1316 	struct nfs4_file *fp = stp->st_stid.sc_file;
1317 
1318 	if (fp && stp->st_deny_bmap != 0)
1319 		recalculate_deny_mode(fp);
1320 
1321 	for (i = 1; i < 4; i++) {
1322 		if (test_access(i, stp))
1323 			nfs4_file_put_access(stp->st_stid.sc_file, i);
1324 		clear_access(i, stp);
1325 	}
1326 }
1327 
1328 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1329 {
1330 	kfree(sop->so_owner.data);
1331 	sop->so_ops->so_free(sop);
1332 }
1333 
1334 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1335 {
1336 	struct nfs4_client *clp = sop->so_client;
1337 
1338 	might_lock(&clp->cl_lock);
1339 
1340 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1341 		return;
1342 	sop->so_ops->so_unhash(sop);
1343 	spin_unlock(&clp->cl_lock);
1344 	nfs4_free_stateowner(sop);
1345 }
1346 
1347 static bool
1348 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1349 {
1350 	return list_empty(&stp->st_perfile);
1351 }
1352 
1353 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1354 {
1355 	struct nfs4_file *fp = stp->st_stid.sc_file;
1356 
1357 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1358 
1359 	if (list_empty(&stp->st_perfile))
1360 		return false;
1361 
1362 	spin_lock(&fp->fi_lock);
1363 	list_del_init(&stp->st_perfile);
1364 	spin_unlock(&fp->fi_lock);
1365 	list_del(&stp->st_perstateowner);
1366 	return true;
1367 }
1368 
1369 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1370 {
1371 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1372 
1373 	put_clnt_odstate(stp->st_clnt_odstate);
1374 	release_all_access(stp);
1375 	if (stp->st_stateowner)
1376 		nfs4_put_stateowner(stp->st_stateowner);
1377 	kmem_cache_free(stateid_slab, stid);
1378 }
1379 
1380 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1381 {
1382 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1383 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1384 	struct nfsd_file *nf;
1385 
1386 	nf = find_any_file(stp->st_stid.sc_file);
1387 	if (nf) {
1388 		get_file(nf->nf_file);
1389 		filp_close(nf->nf_file, (fl_owner_t)lo);
1390 		nfsd_file_put(nf);
1391 	}
1392 	nfs4_free_ol_stateid(stid);
1393 }
1394 
1395 /*
1396  * Put the persistent reference to an already unhashed generic stateid, while
1397  * holding the cl_lock. If it's the last reference, then put it onto the
1398  * reaplist for later destruction.
1399  */
1400 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1401 				       struct list_head *reaplist)
1402 {
1403 	struct nfs4_stid *s = &stp->st_stid;
1404 	struct nfs4_client *clp = s->sc_client;
1405 
1406 	lockdep_assert_held(&clp->cl_lock);
1407 
1408 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1409 
1410 	if (!refcount_dec_and_test(&s->sc_count)) {
1411 		wake_up_all(&close_wq);
1412 		return;
1413 	}
1414 
1415 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1416 	list_add(&stp->st_locks, reaplist);
1417 }
1418 
1419 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1420 {
1421 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1422 
1423 	if (!unhash_ol_stateid(stp))
1424 		return false;
1425 	list_del_init(&stp->st_locks);
1426 	nfs4_unhash_stid(&stp->st_stid);
1427 	return true;
1428 }
1429 
1430 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1431 {
1432 	struct nfs4_client *clp = stp->st_stid.sc_client;
1433 	bool unhashed;
1434 
1435 	spin_lock(&clp->cl_lock);
1436 	unhashed = unhash_lock_stateid(stp);
1437 	spin_unlock(&clp->cl_lock);
1438 	if (unhashed)
1439 		nfs4_put_stid(&stp->st_stid);
1440 }
1441 
1442 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1443 {
1444 	struct nfs4_client *clp = lo->lo_owner.so_client;
1445 
1446 	lockdep_assert_held(&clp->cl_lock);
1447 
1448 	list_del_init(&lo->lo_owner.so_strhash);
1449 }
1450 
1451 /*
1452  * Free a list of generic stateids that were collected earlier after being
1453  * fully unhashed.
1454  */
1455 static void
1456 free_ol_stateid_reaplist(struct list_head *reaplist)
1457 {
1458 	struct nfs4_ol_stateid *stp;
1459 	struct nfs4_file *fp;
1460 
1461 	might_sleep();
1462 
1463 	while (!list_empty(reaplist)) {
1464 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1465 				       st_locks);
1466 		list_del(&stp->st_locks);
1467 		fp = stp->st_stid.sc_file;
1468 		stp->st_stid.sc_free(&stp->st_stid);
1469 		if (fp)
1470 			put_nfs4_file(fp);
1471 	}
1472 }
1473 
1474 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1475 				       struct list_head *reaplist)
1476 {
1477 	struct nfs4_ol_stateid *stp;
1478 
1479 	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1480 
1481 	while (!list_empty(&open_stp->st_locks)) {
1482 		stp = list_entry(open_stp->st_locks.next,
1483 				struct nfs4_ol_stateid, st_locks);
1484 		WARN_ON(!unhash_lock_stateid(stp));
1485 		put_ol_stateid_locked(stp, reaplist);
1486 	}
1487 }
1488 
1489 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1490 				struct list_head *reaplist)
1491 {
1492 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1493 
1494 	if (!unhash_ol_stateid(stp))
1495 		return false;
1496 	release_open_stateid_locks(stp, reaplist);
1497 	return true;
1498 }
1499 
1500 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1501 {
1502 	LIST_HEAD(reaplist);
1503 
1504 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1505 	if (unhash_open_stateid(stp, &reaplist))
1506 		put_ol_stateid_locked(stp, &reaplist);
1507 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1508 	free_ol_stateid_reaplist(&reaplist);
1509 }
1510 
1511 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1512 {
1513 	struct nfs4_client *clp = oo->oo_owner.so_client;
1514 
1515 	lockdep_assert_held(&clp->cl_lock);
1516 
1517 	list_del_init(&oo->oo_owner.so_strhash);
1518 	list_del_init(&oo->oo_perclient);
1519 }
1520 
1521 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1522 {
1523 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1524 					  nfsd_net_id);
1525 	struct nfs4_ol_stateid *s;
1526 
1527 	spin_lock(&nn->client_lock);
1528 	s = oo->oo_last_closed_stid;
1529 	if (s) {
1530 		list_del_init(&oo->oo_close_lru);
1531 		oo->oo_last_closed_stid = NULL;
1532 	}
1533 	spin_unlock(&nn->client_lock);
1534 	if (s)
1535 		nfs4_put_stid(&s->st_stid);
1536 }
1537 
1538 static void release_openowner(struct nfs4_openowner *oo)
1539 {
1540 	struct nfs4_ol_stateid *stp;
1541 	struct nfs4_client *clp = oo->oo_owner.so_client;
1542 	struct list_head reaplist;
1543 
1544 	INIT_LIST_HEAD(&reaplist);
1545 
1546 	spin_lock(&clp->cl_lock);
1547 	unhash_openowner_locked(oo);
1548 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1549 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1550 				struct nfs4_ol_stateid, st_perstateowner);
1551 		if (unhash_open_stateid(stp, &reaplist))
1552 			put_ol_stateid_locked(stp, &reaplist);
1553 	}
1554 	spin_unlock(&clp->cl_lock);
1555 	free_ol_stateid_reaplist(&reaplist);
1556 	release_last_closed_stateid(oo);
1557 	nfs4_put_stateowner(&oo->oo_owner);
1558 }
1559 
1560 static inline int
1561 hash_sessionid(struct nfs4_sessionid *sessionid)
1562 {
1563 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1564 
1565 	return sid->sequence % SESSION_HASH_SIZE;
1566 }
1567 
1568 #ifdef CONFIG_SUNRPC_DEBUG
1569 static inline void
1570 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1571 {
1572 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1573 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1574 }
1575 #else
1576 static inline void
1577 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1578 {
1579 }
1580 #endif
1581 
1582 /*
1583  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1584  * won't be used for replay.
1585  */
1586 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1587 {
1588 	struct nfs4_stateowner *so = cstate->replay_owner;
1589 
1590 	if (nfserr == nfserr_replay_me)
1591 		return;
1592 
1593 	if (!seqid_mutating_err(ntohl(nfserr))) {
1594 		nfsd4_cstate_clear_replay(cstate);
1595 		return;
1596 	}
1597 	if (!so)
1598 		return;
1599 	if (so->so_is_open_owner)
1600 		release_last_closed_stateid(openowner(so));
1601 	so->so_seqid++;
1602 	return;
1603 }
1604 
1605 static void
1606 gen_sessionid(struct nfsd4_session *ses)
1607 {
1608 	struct nfs4_client *clp = ses->se_client;
1609 	struct nfsd4_sessionid *sid;
1610 
1611 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1612 	sid->clientid = clp->cl_clientid;
1613 	sid->sequence = current_sessionid++;
1614 	sid->reserved = 0;
1615 }
1616 
1617 /*
1618  * The protocol defines ca_maxresponssize_cached to include the size of
1619  * the rpc header, but all we need to cache is the data starting after
1620  * the end of the initial SEQUENCE operation--the rest we regenerate
1621  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1622  * value that is the number of bytes in our cache plus a few additional
1623  * bytes.  In order to stay on the safe side, and not promise more than
1624  * we can cache, those additional bytes must be the minimum possible: 24
1625  * bytes of rpc header (xid through accept state, with AUTH_NULL
1626  * verifier), 12 for the compound header (with zero-length tag), and 44
1627  * for the SEQUENCE op response:
1628  */
1629 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1630 
1631 static void
1632 free_session_slots(struct nfsd4_session *ses)
1633 {
1634 	int i;
1635 
1636 	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1637 		free_svc_cred(&ses->se_slots[i]->sl_cred);
1638 		kfree(ses->se_slots[i]);
1639 	}
1640 }
1641 
1642 /*
1643  * We don't actually need to cache the rpc and session headers, so we
1644  * can allocate a little less for each slot:
1645  */
1646 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1647 {
1648 	u32 size;
1649 
1650 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1651 		size = 0;
1652 	else
1653 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1654 	return size + sizeof(struct nfsd4_slot);
1655 }
1656 
1657 /*
1658  * XXX: If we run out of reserved DRC memory we could (up to a point)
1659  * re-negotiate active sessions and reduce their slot usage to make
1660  * room for new connections. For now we just fail the create session.
1661  */
1662 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1663 {
1664 	u32 slotsize = slot_bytes(ca);
1665 	u32 num = ca->maxreqs;
1666 	unsigned long avail, total_avail;
1667 	unsigned int scale_factor;
1668 
1669 	spin_lock(&nfsd_drc_lock);
1670 	if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1671 		total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1672 	else
1673 		/* We have handed out more space than we chose in
1674 		 * set_max_drc() to allow.  That isn't really a
1675 		 * problem as long as that doesn't make us think we
1676 		 * have lots more due to integer overflow.
1677 		 */
1678 		total_avail = 0;
1679 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1680 	/*
1681 	 * Never use more than a fraction of the remaining memory,
1682 	 * unless it's the only way to give this client a slot.
1683 	 * The chosen fraction is either 1/8 or 1/number of threads,
1684 	 * whichever is smaller.  This ensures there are adequate
1685 	 * slots to support multiple clients per thread.
1686 	 * Give the client one slot even if that would require
1687 	 * over-allocation--it is better than failure.
1688 	 */
1689 	scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1690 
1691 	avail = clamp_t(unsigned long, avail, slotsize,
1692 			total_avail/scale_factor);
1693 	num = min_t(int, num, avail / slotsize);
1694 	num = max_t(int, num, 1);
1695 	nfsd_drc_mem_used += num * slotsize;
1696 	spin_unlock(&nfsd_drc_lock);
1697 
1698 	return num;
1699 }
1700 
1701 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1702 {
1703 	int slotsize = slot_bytes(ca);
1704 
1705 	spin_lock(&nfsd_drc_lock);
1706 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1707 	spin_unlock(&nfsd_drc_lock);
1708 }
1709 
1710 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1711 					   struct nfsd4_channel_attrs *battrs)
1712 {
1713 	int numslots = fattrs->maxreqs;
1714 	int slotsize = slot_bytes(fattrs);
1715 	struct nfsd4_session *new;
1716 	int mem, i;
1717 
1718 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1719 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1720 	mem = numslots * sizeof(struct nfsd4_slot *);
1721 
1722 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1723 	if (!new)
1724 		return NULL;
1725 	/* allocate each struct nfsd4_slot and data cache in one piece */
1726 	for (i = 0; i < numslots; i++) {
1727 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1728 		if (!new->se_slots[i])
1729 			goto out_free;
1730 	}
1731 
1732 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1733 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1734 
1735 	return new;
1736 out_free:
1737 	while (i--)
1738 		kfree(new->se_slots[i]);
1739 	kfree(new);
1740 	return NULL;
1741 }
1742 
1743 static void free_conn(struct nfsd4_conn *c)
1744 {
1745 	svc_xprt_put(c->cn_xprt);
1746 	kfree(c);
1747 }
1748 
1749 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1750 {
1751 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1752 	struct nfs4_client *clp = c->cn_session->se_client;
1753 
1754 	trace_nfsd_cb_lost(clp);
1755 
1756 	spin_lock(&clp->cl_lock);
1757 	if (!list_empty(&c->cn_persession)) {
1758 		list_del(&c->cn_persession);
1759 		free_conn(c);
1760 	}
1761 	nfsd4_probe_callback(clp);
1762 	spin_unlock(&clp->cl_lock);
1763 }
1764 
1765 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1766 {
1767 	struct nfsd4_conn *conn;
1768 
1769 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1770 	if (!conn)
1771 		return NULL;
1772 	svc_xprt_get(rqstp->rq_xprt);
1773 	conn->cn_xprt = rqstp->rq_xprt;
1774 	conn->cn_flags = flags;
1775 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1776 	return conn;
1777 }
1778 
1779 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1780 {
1781 	conn->cn_session = ses;
1782 	list_add(&conn->cn_persession, &ses->se_conns);
1783 }
1784 
1785 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1786 {
1787 	struct nfs4_client *clp = ses->se_client;
1788 
1789 	spin_lock(&clp->cl_lock);
1790 	__nfsd4_hash_conn(conn, ses);
1791 	spin_unlock(&clp->cl_lock);
1792 }
1793 
1794 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1795 {
1796 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1797 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1798 }
1799 
1800 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1801 {
1802 	int ret;
1803 
1804 	nfsd4_hash_conn(conn, ses);
1805 	ret = nfsd4_register_conn(conn);
1806 	if (ret)
1807 		/* oops; xprt is already down: */
1808 		nfsd4_conn_lost(&conn->cn_xpt_user);
1809 	/* We may have gained or lost a callback channel: */
1810 	nfsd4_probe_callback_sync(ses->se_client);
1811 }
1812 
1813 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1814 {
1815 	u32 dir = NFS4_CDFC4_FORE;
1816 
1817 	if (cses->flags & SESSION4_BACK_CHAN)
1818 		dir |= NFS4_CDFC4_BACK;
1819 	return alloc_conn(rqstp, dir);
1820 }
1821 
1822 /* must be called under client_lock */
1823 static void nfsd4_del_conns(struct nfsd4_session *s)
1824 {
1825 	struct nfs4_client *clp = s->se_client;
1826 	struct nfsd4_conn *c;
1827 
1828 	spin_lock(&clp->cl_lock);
1829 	while (!list_empty(&s->se_conns)) {
1830 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1831 		list_del_init(&c->cn_persession);
1832 		spin_unlock(&clp->cl_lock);
1833 
1834 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1835 		free_conn(c);
1836 
1837 		spin_lock(&clp->cl_lock);
1838 	}
1839 	spin_unlock(&clp->cl_lock);
1840 }
1841 
1842 static void __free_session(struct nfsd4_session *ses)
1843 {
1844 	free_session_slots(ses);
1845 	kfree(ses);
1846 }
1847 
1848 static void free_session(struct nfsd4_session *ses)
1849 {
1850 	nfsd4_del_conns(ses);
1851 	nfsd4_put_drc_mem(&ses->se_fchannel);
1852 	__free_session(ses);
1853 }
1854 
1855 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1856 {
1857 	int idx;
1858 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1859 
1860 	new->se_client = clp;
1861 	gen_sessionid(new);
1862 
1863 	INIT_LIST_HEAD(&new->se_conns);
1864 
1865 	new->se_cb_seq_nr = 1;
1866 	new->se_flags = cses->flags;
1867 	new->se_cb_prog = cses->callback_prog;
1868 	new->se_cb_sec = cses->cb_sec;
1869 	atomic_set(&new->se_ref, 0);
1870 	idx = hash_sessionid(&new->se_sessionid);
1871 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1872 	spin_lock(&clp->cl_lock);
1873 	list_add(&new->se_perclnt, &clp->cl_sessions);
1874 	spin_unlock(&clp->cl_lock);
1875 
1876 	{
1877 		struct sockaddr *sa = svc_addr(rqstp);
1878 		/*
1879 		 * This is a little silly; with sessions there's no real
1880 		 * use for the callback address.  Use the peer address
1881 		 * as a reasonable default for now, but consider fixing
1882 		 * the rpc client not to require an address in the
1883 		 * future:
1884 		 */
1885 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1886 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1887 	}
1888 }
1889 
1890 /* caller must hold client_lock */
1891 static struct nfsd4_session *
1892 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1893 {
1894 	struct nfsd4_session *elem;
1895 	int idx;
1896 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1897 
1898 	lockdep_assert_held(&nn->client_lock);
1899 
1900 	dump_sessionid(__func__, sessionid);
1901 	idx = hash_sessionid(sessionid);
1902 	/* Search in the appropriate list */
1903 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1904 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1905 			    NFS4_MAX_SESSIONID_LEN)) {
1906 			return elem;
1907 		}
1908 	}
1909 
1910 	dprintk("%s: session not found\n", __func__);
1911 	return NULL;
1912 }
1913 
1914 static struct nfsd4_session *
1915 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1916 		__be32 *ret)
1917 {
1918 	struct nfsd4_session *session;
1919 	__be32 status = nfserr_badsession;
1920 
1921 	session = __find_in_sessionid_hashtbl(sessionid, net);
1922 	if (!session)
1923 		goto out;
1924 	status = nfsd4_get_session_locked(session);
1925 	if (status)
1926 		session = NULL;
1927 out:
1928 	*ret = status;
1929 	return session;
1930 }
1931 
1932 /* caller must hold client_lock */
1933 static void
1934 unhash_session(struct nfsd4_session *ses)
1935 {
1936 	struct nfs4_client *clp = ses->se_client;
1937 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1938 
1939 	lockdep_assert_held(&nn->client_lock);
1940 
1941 	list_del(&ses->se_hash);
1942 	spin_lock(&ses->se_client->cl_lock);
1943 	list_del(&ses->se_perclnt);
1944 	spin_unlock(&ses->se_client->cl_lock);
1945 }
1946 
1947 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1948 static int
1949 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1950 {
1951 	/*
1952 	 * We're assuming the clid was not given out from a boot
1953 	 * precisely 2^32 (about 136 years) before this one.  That seems
1954 	 * a safe assumption:
1955 	 */
1956 	if (clid->cl_boot == (u32)nn->boot_time)
1957 		return 0;
1958 	trace_nfsd_clid_stale(clid);
1959 	return 1;
1960 }
1961 
1962 /*
1963  * XXX Should we use a slab cache ?
1964  * This type of memory management is somewhat inefficient, but we use it
1965  * anyway since SETCLIENTID is not a common operation.
1966  */
1967 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1968 {
1969 	struct nfs4_client *clp;
1970 	int i;
1971 
1972 	clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1973 	if (clp == NULL)
1974 		return NULL;
1975 	xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1976 	if (clp->cl_name.data == NULL)
1977 		goto err_no_name;
1978 	clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1979 						 sizeof(struct list_head),
1980 						 GFP_KERNEL);
1981 	if (!clp->cl_ownerstr_hashtbl)
1982 		goto err_no_hashtbl;
1983 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1984 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1985 	INIT_LIST_HEAD(&clp->cl_sessions);
1986 	idr_init(&clp->cl_stateids);
1987 	atomic_set(&clp->cl_rpc_users, 0);
1988 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1989 	INIT_LIST_HEAD(&clp->cl_idhash);
1990 	INIT_LIST_HEAD(&clp->cl_openowners);
1991 	INIT_LIST_HEAD(&clp->cl_delegations);
1992 	INIT_LIST_HEAD(&clp->cl_lru);
1993 	INIT_LIST_HEAD(&clp->cl_revoked);
1994 #ifdef CONFIG_NFSD_PNFS
1995 	INIT_LIST_HEAD(&clp->cl_lo_states);
1996 #endif
1997 	INIT_LIST_HEAD(&clp->async_copies);
1998 	spin_lock_init(&clp->async_lock);
1999 	spin_lock_init(&clp->cl_lock);
2000 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2001 	return clp;
2002 err_no_hashtbl:
2003 	kfree(clp->cl_name.data);
2004 err_no_name:
2005 	kmem_cache_free(client_slab, clp);
2006 	return NULL;
2007 }
2008 
2009 static void __free_client(struct kref *k)
2010 {
2011 	struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2012 	struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2013 
2014 	free_svc_cred(&clp->cl_cred);
2015 	kfree(clp->cl_ownerstr_hashtbl);
2016 	kfree(clp->cl_name.data);
2017 	kfree(clp->cl_nii_domain.data);
2018 	kfree(clp->cl_nii_name.data);
2019 	idr_destroy(&clp->cl_stateids);
2020 	kmem_cache_free(client_slab, clp);
2021 }
2022 
2023 static void drop_client(struct nfs4_client *clp)
2024 {
2025 	kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2026 }
2027 
2028 static void
2029 free_client(struct nfs4_client *clp)
2030 {
2031 	while (!list_empty(&clp->cl_sessions)) {
2032 		struct nfsd4_session *ses;
2033 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2034 				se_perclnt);
2035 		list_del(&ses->se_perclnt);
2036 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
2037 		free_session(ses);
2038 	}
2039 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2040 	if (clp->cl_nfsd_dentry) {
2041 		nfsd_client_rmdir(clp->cl_nfsd_dentry);
2042 		clp->cl_nfsd_dentry = NULL;
2043 		wake_up_all(&expiry_wq);
2044 	}
2045 	drop_client(clp);
2046 }
2047 
2048 /* must be called under the client_lock */
2049 static void
2050 unhash_client_locked(struct nfs4_client *clp)
2051 {
2052 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2053 	struct nfsd4_session *ses;
2054 
2055 	lockdep_assert_held(&nn->client_lock);
2056 
2057 	/* Mark the client as expired! */
2058 	clp->cl_time = 0;
2059 	/* Make it invisible */
2060 	if (!list_empty(&clp->cl_idhash)) {
2061 		list_del_init(&clp->cl_idhash);
2062 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2063 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2064 		else
2065 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2066 	}
2067 	list_del_init(&clp->cl_lru);
2068 	spin_lock(&clp->cl_lock);
2069 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2070 		list_del_init(&ses->se_hash);
2071 	spin_unlock(&clp->cl_lock);
2072 }
2073 
2074 static void
2075 unhash_client(struct nfs4_client *clp)
2076 {
2077 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2078 
2079 	spin_lock(&nn->client_lock);
2080 	unhash_client_locked(clp);
2081 	spin_unlock(&nn->client_lock);
2082 }
2083 
2084 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2085 {
2086 	if (atomic_read(&clp->cl_rpc_users))
2087 		return nfserr_jukebox;
2088 	unhash_client_locked(clp);
2089 	return nfs_ok;
2090 }
2091 
2092 static void
2093 __destroy_client(struct nfs4_client *clp)
2094 {
2095 	int i;
2096 	struct nfs4_openowner *oo;
2097 	struct nfs4_delegation *dp;
2098 	struct list_head reaplist;
2099 
2100 	INIT_LIST_HEAD(&reaplist);
2101 	spin_lock(&state_lock);
2102 	while (!list_empty(&clp->cl_delegations)) {
2103 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2104 		WARN_ON(!unhash_delegation_locked(dp));
2105 		list_add(&dp->dl_recall_lru, &reaplist);
2106 	}
2107 	spin_unlock(&state_lock);
2108 	while (!list_empty(&reaplist)) {
2109 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2110 		list_del_init(&dp->dl_recall_lru);
2111 		destroy_unhashed_deleg(dp);
2112 	}
2113 	while (!list_empty(&clp->cl_revoked)) {
2114 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2115 		list_del_init(&dp->dl_recall_lru);
2116 		nfs4_put_stid(&dp->dl_stid);
2117 	}
2118 	while (!list_empty(&clp->cl_openowners)) {
2119 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2120 		nfs4_get_stateowner(&oo->oo_owner);
2121 		release_openowner(oo);
2122 	}
2123 	for (i = 0; i < OWNER_HASH_SIZE; i++) {
2124 		struct nfs4_stateowner *so, *tmp;
2125 
2126 		list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2127 					 so_strhash) {
2128 			/* Should be no openowners at this point */
2129 			WARN_ON_ONCE(so->so_is_open_owner);
2130 			remove_blocked_locks(lockowner(so));
2131 		}
2132 	}
2133 	nfsd4_return_all_client_layouts(clp);
2134 	nfsd4_shutdown_copy(clp);
2135 	nfsd4_shutdown_callback(clp);
2136 	if (clp->cl_cb_conn.cb_xprt)
2137 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2138 	free_client(clp);
2139 	wake_up_all(&expiry_wq);
2140 }
2141 
2142 static void
2143 destroy_client(struct nfs4_client *clp)
2144 {
2145 	unhash_client(clp);
2146 	__destroy_client(clp);
2147 }
2148 
2149 static void inc_reclaim_complete(struct nfs4_client *clp)
2150 {
2151 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2152 
2153 	if (!nn->track_reclaim_completes)
2154 		return;
2155 	if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2156 		return;
2157 	if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2158 			nn->reclaim_str_hashtbl_size) {
2159 		printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2160 				clp->net->ns.inum);
2161 		nfsd4_end_grace(nn);
2162 	}
2163 }
2164 
2165 static void expire_client(struct nfs4_client *clp)
2166 {
2167 	unhash_client(clp);
2168 	nfsd4_client_record_remove(clp);
2169 	__destroy_client(clp);
2170 }
2171 
2172 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2173 {
2174 	memcpy(target->cl_verifier.data, source->data,
2175 			sizeof(target->cl_verifier.data));
2176 }
2177 
2178 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2179 {
2180 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2181 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2182 }
2183 
2184 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2185 {
2186 	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2187 	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2188 								GFP_KERNEL);
2189 	target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2190 	if ((source->cr_principal && !target->cr_principal) ||
2191 	    (source->cr_raw_principal && !target->cr_raw_principal) ||
2192 	    (source->cr_targ_princ && !target->cr_targ_princ))
2193 		return -ENOMEM;
2194 
2195 	target->cr_flavor = source->cr_flavor;
2196 	target->cr_uid = source->cr_uid;
2197 	target->cr_gid = source->cr_gid;
2198 	target->cr_group_info = source->cr_group_info;
2199 	get_group_info(target->cr_group_info);
2200 	target->cr_gss_mech = source->cr_gss_mech;
2201 	if (source->cr_gss_mech)
2202 		gss_mech_get(source->cr_gss_mech);
2203 	return 0;
2204 }
2205 
2206 static int
2207 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2208 {
2209 	if (o1->len < o2->len)
2210 		return -1;
2211 	if (o1->len > o2->len)
2212 		return 1;
2213 	return memcmp(o1->data, o2->data, o1->len);
2214 }
2215 
2216 static int
2217 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2218 {
2219 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2220 }
2221 
2222 static int
2223 same_clid(clientid_t *cl1, clientid_t *cl2)
2224 {
2225 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2226 }
2227 
2228 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2229 {
2230 	int i;
2231 
2232 	if (g1->ngroups != g2->ngroups)
2233 		return false;
2234 	for (i=0; i<g1->ngroups; i++)
2235 		if (!gid_eq(g1->gid[i], g2->gid[i]))
2236 			return false;
2237 	return true;
2238 }
2239 
2240 /*
2241  * RFC 3530 language requires clid_inuse be returned when the
2242  * "principal" associated with a requests differs from that previously
2243  * used.  We use uid, gid's, and gss principal string as our best
2244  * approximation.  We also don't want to allow non-gss use of a client
2245  * established using gss: in theory cr_principal should catch that
2246  * change, but in practice cr_principal can be null even in the gss case
2247  * since gssd doesn't always pass down a principal string.
2248  */
2249 static bool is_gss_cred(struct svc_cred *cr)
2250 {
2251 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
2252 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2253 }
2254 
2255 
2256 static bool
2257 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2258 {
2259 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2260 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2261 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2262 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2263 		return false;
2264 	/* XXX: check that cr_targ_princ fields match ? */
2265 	if (cr1->cr_principal == cr2->cr_principal)
2266 		return true;
2267 	if (!cr1->cr_principal || !cr2->cr_principal)
2268 		return false;
2269 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2270 }
2271 
2272 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2273 {
2274 	struct svc_cred *cr = &rqstp->rq_cred;
2275 	u32 service;
2276 
2277 	if (!cr->cr_gss_mech)
2278 		return false;
2279 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2280 	return service == RPC_GSS_SVC_INTEGRITY ||
2281 	       service == RPC_GSS_SVC_PRIVACY;
2282 }
2283 
2284 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2285 {
2286 	struct svc_cred *cr = &rqstp->rq_cred;
2287 
2288 	if (!cl->cl_mach_cred)
2289 		return true;
2290 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2291 		return false;
2292 	if (!svc_rqst_integrity_protected(rqstp))
2293 		return false;
2294 	if (cl->cl_cred.cr_raw_principal)
2295 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2296 						cr->cr_raw_principal);
2297 	if (!cr->cr_principal)
2298 		return false;
2299 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2300 }
2301 
2302 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2303 {
2304 	__be32 verf[2];
2305 
2306 	/*
2307 	 * This is opaque to client, so no need to byte-swap. Use
2308 	 * __force to keep sparse happy
2309 	 */
2310 	verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2311 	verf[1] = (__force __be32)nn->clverifier_counter++;
2312 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2313 }
2314 
2315 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2316 {
2317 	clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2318 	clp->cl_clientid.cl_id = nn->clientid_counter++;
2319 	gen_confirm(clp, nn);
2320 }
2321 
2322 static struct nfs4_stid *
2323 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2324 {
2325 	struct nfs4_stid *ret;
2326 
2327 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2328 	if (!ret || !ret->sc_type)
2329 		return NULL;
2330 	return ret;
2331 }
2332 
2333 static struct nfs4_stid *
2334 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2335 {
2336 	struct nfs4_stid *s;
2337 
2338 	spin_lock(&cl->cl_lock);
2339 	s = find_stateid_locked(cl, t);
2340 	if (s != NULL) {
2341 		if (typemask & s->sc_type)
2342 			refcount_inc(&s->sc_count);
2343 		else
2344 			s = NULL;
2345 	}
2346 	spin_unlock(&cl->cl_lock);
2347 	return s;
2348 }
2349 
2350 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2351 {
2352 	struct nfsdfs_client *nc;
2353 	nc = get_nfsdfs_client(inode);
2354 	if (!nc)
2355 		return NULL;
2356 	return container_of(nc, struct nfs4_client, cl_nfsdfs);
2357 }
2358 
2359 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2360 {
2361 	seq_printf(m, "\"");
2362 	seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2363 	seq_printf(m, "\"");
2364 }
2365 
2366 static const char *cb_state2str(int state)
2367 {
2368 	switch (state) {
2369 	case NFSD4_CB_UP:
2370 		return "UP";
2371 	case NFSD4_CB_UNKNOWN:
2372 		return "UNKNOWN";
2373 	case NFSD4_CB_DOWN:
2374 		return "DOWN";
2375 	case NFSD4_CB_FAULT:
2376 		return "FAULT";
2377 	}
2378 	return "UNDEFINED";
2379 }
2380 
2381 static int client_info_show(struct seq_file *m, void *v)
2382 {
2383 	struct inode *inode = m->private;
2384 	struct nfs4_client *clp;
2385 	u64 clid;
2386 
2387 	clp = get_nfsdfs_clp(inode);
2388 	if (!clp)
2389 		return -ENXIO;
2390 	memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2391 	seq_printf(m, "clientid: 0x%llx\n", clid);
2392 	seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2393 	if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2394 		seq_puts(m, "status: confirmed\n");
2395 	else
2396 		seq_puts(m, "status: unconfirmed\n");
2397 	seq_printf(m, "name: ");
2398 	seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2399 	seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2400 	if (clp->cl_nii_domain.data) {
2401 		seq_printf(m, "Implementation domain: ");
2402 		seq_quote_mem(m, clp->cl_nii_domain.data,
2403 					clp->cl_nii_domain.len);
2404 		seq_printf(m, "\nImplementation name: ");
2405 		seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2406 		seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2407 			clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2408 	}
2409 	seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2410 	seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2411 	drop_client(clp);
2412 
2413 	return 0;
2414 }
2415 
2416 static int client_info_open(struct inode *inode, struct file *file)
2417 {
2418 	return single_open(file, client_info_show, inode);
2419 }
2420 
2421 static const struct file_operations client_info_fops = {
2422 	.open		= client_info_open,
2423 	.read		= seq_read,
2424 	.llseek		= seq_lseek,
2425 	.release	= single_release,
2426 };
2427 
2428 static void *states_start(struct seq_file *s, loff_t *pos)
2429 	__acquires(&clp->cl_lock)
2430 {
2431 	struct nfs4_client *clp = s->private;
2432 	unsigned long id = *pos;
2433 	void *ret;
2434 
2435 	spin_lock(&clp->cl_lock);
2436 	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2437 	*pos = id;
2438 	return ret;
2439 }
2440 
2441 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2442 {
2443 	struct nfs4_client *clp = s->private;
2444 	unsigned long id = *pos;
2445 	void *ret;
2446 
2447 	id = *pos;
2448 	id++;
2449 	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2450 	*pos = id;
2451 	return ret;
2452 }
2453 
2454 static void states_stop(struct seq_file *s, void *v)
2455 	__releases(&clp->cl_lock)
2456 {
2457 	struct nfs4_client *clp = s->private;
2458 
2459 	spin_unlock(&clp->cl_lock);
2460 }
2461 
2462 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2463 {
2464          seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2465 }
2466 
2467 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2468 {
2469 	struct inode *inode = f->nf_inode;
2470 
2471 	seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2472 					MAJOR(inode->i_sb->s_dev),
2473 					 MINOR(inode->i_sb->s_dev),
2474 					 inode->i_ino);
2475 }
2476 
2477 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2478 {
2479 	seq_printf(s, "owner: ");
2480 	seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2481 }
2482 
2483 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2484 {
2485 	seq_printf(s, "0x%.8x", stid->si_generation);
2486 	seq_printf(s, "%12phN", &stid->si_opaque);
2487 }
2488 
2489 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2490 {
2491 	struct nfs4_ol_stateid *ols;
2492 	struct nfs4_file *nf;
2493 	struct nfsd_file *file;
2494 	struct nfs4_stateowner *oo;
2495 	unsigned int access, deny;
2496 
2497 	if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2498 		return 0; /* XXX: or SEQ_SKIP? */
2499 	ols = openlockstateid(st);
2500 	oo = ols->st_stateowner;
2501 	nf = st->sc_file;
2502 	file = find_any_file(nf);
2503 	if (!file)
2504 		return 0;
2505 
2506 	seq_printf(s, "- ");
2507 	nfs4_show_stateid(s, &st->sc_stateid);
2508 	seq_printf(s, ": { type: open, ");
2509 
2510 	access = bmap_to_share_mode(ols->st_access_bmap);
2511 	deny   = bmap_to_share_mode(ols->st_deny_bmap);
2512 
2513 	seq_printf(s, "access: %s%s, ",
2514 		access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2515 		access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2516 	seq_printf(s, "deny: %s%s, ",
2517 		deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2518 		deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2519 
2520 	nfs4_show_superblock(s, file);
2521 	seq_printf(s, ", ");
2522 	nfs4_show_fname(s, file);
2523 	seq_printf(s, ", ");
2524 	nfs4_show_owner(s, oo);
2525 	seq_printf(s, " }\n");
2526 	nfsd_file_put(file);
2527 
2528 	return 0;
2529 }
2530 
2531 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2532 {
2533 	struct nfs4_ol_stateid *ols;
2534 	struct nfs4_file *nf;
2535 	struct nfsd_file *file;
2536 	struct nfs4_stateowner *oo;
2537 
2538 	ols = openlockstateid(st);
2539 	oo = ols->st_stateowner;
2540 	nf = st->sc_file;
2541 	file = find_any_file(nf);
2542 	if (!file)
2543 		return 0;
2544 
2545 	seq_printf(s, "- ");
2546 	nfs4_show_stateid(s, &st->sc_stateid);
2547 	seq_printf(s, ": { type: lock, ");
2548 
2549 	/*
2550 	 * Note: a lock stateid isn't really the same thing as a lock,
2551 	 * it's the locking state held by one owner on a file, and there
2552 	 * may be multiple (or no) lock ranges associated with it.
2553 	 * (Same for the matter is true of open stateids.)
2554 	 */
2555 
2556 	nfs4_show_superblock(s, file);
2557 	/* XXX: open stateid? */
2558 	seq_printf(s, ", ");
2559 	nfs4_show_fname(s, file);
2560 	seq_printf(s, ", ");
2561 	nfs4_show_owner(s, oo);
2562 	seq_printf(s, " }\n");
2563 	nfsd_file_put(file);
2564 
2565 	return 0;
2566 }
2567 
2568 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2569 {
2570 	struct nfs4_delegation *ds;
2571 	struct nfs4_file *nf;
2572 	struct nfsd_file *file;
2573 
2574 	ds = delegstateid(st);
2575 	nf = st->sc_file;
2576 	file = find_deleg_file(nf);
2577 	if (!file)
2578 		return 0;
2579 
2580 	seq_printf(s, "- ");
2581 	nfs4_show_stateid(s, &st->sc_stateid);
2582 	seq_printf(s, ": { type: deleg, ");
2583 
2584 	/* Kinda dead code as long as we only support read delegs: */
2585 	seq_printf(s, "access: %s, ",
2586 		ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2587 
2588 	/* XXX: lease time, whether it's being recalled. */
2589 
2590 	nfs4_show_superblock(s, file);
2591 	seq_printf(s, ", ");
2592 	nfs4_show_fname(s, file);
2593 	seq_printf(s, " }\n");
2594 	nfsd_file_put(file);
2595 
2596 	return 0;
2597 }
2598 
2599 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2600 {
2601 	struct nfs4_layout_stateid *ls;
2602 	struct nfsd_file *file;
2603 
2604 	ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2605 	file = ls->ls_file;
2606 
2607 	seq_printf(s, "- ");
2608 	nfs4_show_stateid(s, &st->sc_stateid);
2609 	seq_printf(s, ": { type: layout, ");
2610 
2611 	/* XXX: What else would be useful? */
2612 
2613 	nfs4_show_superblock(s, file);
2614 	seq_printf(s, ", ");
2615 	nfs4_show_fname(s, file);
2616 	seq_printf(s, " }\n");
2617 
2618 	return 0;
2619 }
2620 
2621 static int states_show(struct seq_file *s, void *v)
2622 {
2623 	struct nfs4_stid *st = v;
2624 
2625 	switch (st->sc_type) {
2626 	case NFS4_OPEN_STID:
2627 		return nfs4_show_open(s, st);
2628 	case NFS4_LOCK_STID:
2629 		return nfs4_show_lock(s, st);
2630 	case NFS4_DELEG_STID:
2631 		return nfs4_show_deleg(s, st);
2632 	case NFS4_LAYOUT_STID:
2633 		return nfs4_show_layout(s, st);
2634 	default:
2635 		return 0; /* XXX: or SEQ_SKIP? */
2636 	}
2637 	/* XXX: copy stateids? */
2638 }
2639 
2640 static struct seq_operations states_seq_ops = {
2641 	.start = states_start,
2642 	.next = states_next,
2643 	.stop = states_stop,
2644 	.show = states_show
2645 };
2646 
2647 static int client_states_open(struct inode *inode, struct file *file)
2648 {
2649 	struct seq_file *s;
2650 	struct nfs4_client *clp;
2651 	int ret;
2652 
2653 	clp = get_nfsdfs_clp(inode);
2654 	if (!clp)
2655 		return -ENXIO;
2656 
2657 	ret = seq_open(file, &states_seq_ops);
2658 	if (ret)
2659 		return ret;
2660 	s = file->private_data;
2661 	s->private = clp;
2662 	return 0;
2663 }
2664 
2665 static int client_opens_release(struct inode *inode, struct file *file)
2666 {
2667 	struct seq_file *m = file->private_data;
2668 	struct nfs4_client *clp = m->private;
2669 
2670 	/* XXX: alternatively, we could get/drop in seq start/stop */
2671 	drop_client(clp);
2672 	return 0;
2673 }
2674 
2675 static const struct file_operations client_states_fops = {
2676 	.open		= client_states_open,
2677 	.read		= seq_read,
2678 	.llseek		= seq_lseek,
2679 	.release	= client_opens_release,
2680 };
2681 
2682 /*
2683  * Normally we refuse to destroy clients that are in use, but here the
2684  * administrator is telling us to just do it.  We also want to wait
2685  * so the caller has a guarantee that the client's locks are gone by
2686  * the time the write returns:
2687  */
2688 static void force_expire_client(struct nfs4_client *clp)
2689 {
2690 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2691 	bool already_expired;
2692 
2693 	trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2694 
2695 	spin_lock(&nn->client_lock);
2696 	clp->cl_time = 0;
2697 	spin_unlock(&nn->client_lock);
2698 
2699 	wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2700 	spin_lock(&nn->client_lock);
2701 	already_expired = list_empty(&clp->cl_lru);
2702 	if (!already_expired)
2703 		unhash_client_locked(clp);
2704 	spin_unlock(&nn->client_lock);
2705 
2706 	if (!already_expired)
2707 		expire_client(clp);
2708 	else
2709 		wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2710 }
2711 
2712 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2713 				   size_t size, loff_t *pos)
2714 {
2715 	char *data;
2716 	struct nfs4_client *clp;
2717 
2718 	data = simple_transaction_get(file, buf, size);
2719 	if (IS_ERR(data))
2720 		return PTR_ERR(data);
2721 	if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2722 		return -EINVAL;
2723 	clp = get_nfsdfs_clp(file_inode(file));
2724 	if (!clp)
2725 		return -ENXIO;
2726 	force_expire_client(clp);
2727 	drop_client(clp);
2728 	return 7;
2729 }
2730 
2731 static const struct file_operations client_ctl_fops = {
2732 	.write		= client_ctl_write,
2733 	.release	= simple_transaction_release,
2734 };
2735 
2736 static const struct tree_descr client_files[] = {
2737 	[0] = {"info", &client_info_fops, S_IRUSR},
2738 	[1] = {"states", &client_states_fops, S_IRUSR},
2739 	[2] = {"ctl", &client_ctl_fops, S_IWUSR},
2740 	[3] = {""},
2741 };
2742 
2743 static struct nfs4_client *create_client(struct xdr_netobj name,
2744 		struct svc_rqst *rqstp, nfs4_verifier *verf)
2745 {
2746 	struct nfs4_client *clp;
2747 	struct sockaddr *sa = svc_addr(rqstp);
2748 	int ret;
2749 	struct net *net = SVC_NET(rqstp);
2750 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2751 	struct dentry *dentries[ARRAY_SIZE(client_files)];
2752 
2753 	clp = alloc_client(name);
2754 	if (clp == NULL)
2755 		return NULL;
2756 
2757 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2758 	if (ret) {
2759 		free_client(clp);
2760 		return NULL;
2761 	}
2762 	gen_clid(clp, nn);
2763 	kref_init(&clp->cl_nfsdfs.cl_ref);
2764 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2765 	clp->cl_time = ktime_get_boottime_seconds();
2766 	clear_bit(0, &clp->cl_cb_slot_busy);
2767 	copy_verf(clp, verf);
2768 	memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2769 	clp->cl_cb_session = NULL;
2770 	clp->net = net;
2771 	clp->cl_nfsd_dentry = nfsd_client_mkdir(
2772 		nn, &clp->cl_nfsdfs,
2773 		clp->cl_clientid.cl_id - nn->clientid_base,
2774 		client_files, dentries);
2775 	clp->cl_nfsd_info_dentry = dentries[0];
2776 	if (!clp->cl_nfsd_dentry) {
2777 		free_client(clp);
2778 		return NULL;
2779 	}
2780 	return clp;
2781 }
2782 
2783 static void
2784 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2785 {
2786 	struct rb_node **new = &(root->rb_node), *parent = NULL;
2787 	struct nfs4_client *clp;
2788 
2789 	while (*new) {
2790 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2791 		parent = *new;
2792 
2793 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2794 			new = &((*new)->rb_left);
2795 		else
2796 			new = &((*new)->rb_right);
2797 	}
2798 
2799 	rb_link_node(&new_clp->cl_namenode, parent, new);
2800 	rb_insert_color(&new_clp->cl_namenode, root);
2801 }
2802 
2803 static struct nfs4_client *
2804 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2805 {
2806 	int cmp;
2807 	struct rb_node *node = root->rb_node;
2808 	struct nfs4_client *clp;
2809 
2810 	while (node) {
2811 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2812 		cmp = compare_blob(&clp->cl_name, name);
2813 		if (cmp > 0)
2814 			node = node->rb_left;
2815 		else if (cmp < 0)
2816 			node = node->rb_right;
2817 		else
2818 			return clp;
2819 	}
2820 	return NULL;
2821 }
2822 
2823 static void
2824 add_to_unconfirmed(struct nfs4_client *clp)
2825 {
2826 	unsigned int idhashval;
2827 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2828 
2829 	lockdep_assert_held(&nn->client_lock);
2830 
2831 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2832 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2833 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2834 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2835 	renew_client_locked(clp);
2836 }
2837 
2838 static void
2839 move_to_confirmed(struct nfs4_client *clp)
2840 {
2841 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2842 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2843 
2844 	lockdep_assert_held(&nn->client_lock);
2845 
2846 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2847 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2848 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
2849 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2850 	trace_nfsd_clid_confirmed(&clp->cl_clientid);
2851 	renew_client_locked(clp);
2852 }
2853 
2854 static struct nfs4_client *
2855 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2856 {
2857 	struct nfs4_client *clp;
2858 	unsigned int idhashval = clientid_hashval(clid->cl_id);
2859 
2860 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2861 		if (same_clid(&clp->cl_clientid, clid)) {
2862 			if ((bool)clp->cl_minorversion != sessions)
2863 				return NULL;
2864 			renew_client_locked(clp);
2865 			return clp;
2866 		}
2867 	}
2868 	return NULL;
2869 }
2870 
2871 static struct nfs4_client *
2872 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2873 {
2874 	struct list_head *tbl = nn->conf_id_hashtbl;
2875 
2876 	lockdep_assert_held(&nn->client_lock);
2877 	return find_client_in_id_table(tbl, clid, sessions);
2878 }
2879 
2880 static struct nfs4_client *
2881 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2882 {
2883 	struct list_head *tbl = nn->unconf_id_hashtbl;
2884 
2885 	lockdep_assert_held(&nn->client_lock);
2886 	return find_client_in_id_table(tbl, clid, sessions);
2887 }
2888 
2889 static bool clp_used_exchangeid(struct nfs4_client *clp)
2890 {
2891 	return clp->cl_exchange_flags != 0;
2892 }
2893 
2894 static struct nfs4_client *
2895 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2896 {
2897 	lockdep_assert_held(&nn->client_lock);
2898 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
2899 }
2900 
2901 static struct nfs4_client *
2902 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2903 {
2904 	lockdep_assert_held(&nn->client_lock);
2905 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2906 }
2907 
2908 static void
2909 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2910 {
2911 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2912 	struct sockaddr	*sa = svc_addr(rqstp);
2913 	u32 scopeid = rpc_get_scope_id(sa);
2914 	unsigned short expected_family;
2915 
2916 	/* Currently, we only support tcp and tcp6 for the callback channel */
2917 	if (se->se_callback_netid_len == 3 &&
2918 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2919 		expected_family = AF_INET;
2920 	else if (se->se_callback_netid_len == 4 &&
2921 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2922 		expected_family = AF_INET6;
2923 	else
2924 		goto out_err;
2925 
2926 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2927 					    se->se_callback_addr_len,
2928 					    (struct sockaddr *)&conn->cb_addr,
2929 					    sizeof(conn->cb_addr));
2930 
2931 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2932 		goto out_err;
2933 
2934 	if (conn->cb_addr.ss_family == AF_INET6)
2935 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2936 
2937 	conn->cb_prog = se->se_callback_prog;
2938 	conn->cb_ident = se->se_callback_ident;
2939 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2940 	trace_nfsd_cb_args(clp, conn);
2941 	return;
2942 out_err:
2943 	conn->cb_addr.ss_family = AF_UNSPEC;
2944 	conn->cb_addrlen = 0;
2945 	trace_nfsd_cb_nodelegs(clp);
2946 	return;
2947 }
2948 
2949 /*
2950  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2951  */
2952 static void
2953 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2954 {
2955 	struct xdr_buf *buf = resp->xdr->buf;
2956 	struct nfsd4_slot *slot = resp->cstate.slot;
2957 	unsigned int base;
2958 
2959 	dprintk("--> %s slot %p\n", __func__, slot);
2960 
2961 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2962 	slot->sl_opcnt = resp->opcnt;
2963 	slot->sl_status = resp->cstate.status;
2964 	free_svc_cred(&slot->sl_cred);
2965 	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2966 
2967 	if (!nfsd4_cache_this(resp)) {
2968 		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2969 		return;
2970 	}
2971 	slot->sl_flags |= NFSD4_SLOT_CACHED;
2972 
2973 	base = resp->cstate.data_offset;
2974 	slot->sl_datalen = buf->len - base;
2975 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2976 		WARN(1, "%s: sessions DRC could not cache compound\n",
2977 		     __func__);
2978 	return;
2979 }
2980 
2981 /*
2982  * Encode the replay sequence operation from the slot values.
2983  * If cachethis is FALSE encode the uncached rep error on the next
2984  * operation which sets resp->p and increments resp->opcnt for
2985  * nfs4svc_encode_compoundres.
2986  *
2987  */
2988 static __be32
2989 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2990 			  struct nfsd4_compoundres *resp)
2991 {
2992 	struct nfsd4_op *op;
2993 	struct nfsd4_slot *slot = resp->cstate.slot;
2994 
2995 	/* Encode the replayed sequence operation */
2996 	op = &args->ops[resp->opcnt - 1];
2997 	nfsd4_encode_operation(resp, op);
2998 
2999 	if (slot->sl_flags & NFSD4_SLOT_CACHED)
3000 		return op->status;
3001 	if (args->opcnt == 1) {
3002 		/*
3003 		 * The original operation wasn't a solo sequence--we
3004 		 * always cache those--so this retry must not match the
3005 		 * original:
3006 		 */
3007 		op->status = nfserr_seq_false_retry;
3008 	} else {
3009 		op = &args->ops[resp->opcnt++];
3010 		op->status = nfserr_retry_uncached_rep;
3011 		nfsd4_encode_operation(resp, op);
3012 	}
3013 	return op->status;
3014 }
3015 
3016 /*
3017  * The sequence operation is not cached because we can use the slot and
3018  * session values.
3019  */
3020 static __be32
3021 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3022 			 struct nfsd4_sequence *seq)
3023 {
3024 	struct nfsd4_slot *slot = resp->cstate.slot;
3025 	struct xdr_stream *xdr = resp->xdr;
3026 	__be32 *p;
3027 	__be32 status;
3028 
3029 	dprintk("--> %s slot %p\n", __func__, slot);
3030 
3031 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3032 	if (status)
3033 		return status;
3034 
3035 	p = xdr_reserve_space(xdr, slot->sl_datalen);
3036 	if (!p) {
3037 		WARN_ON_ONCE(1);
3038 		return nfserr_serverfault;
3039 	}
3040 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3041 	xdr_commit_encode(xdr);
3042 
3043 	resp->opcnt = slot->sl_opcnt;
3044 	return slot->sl_status;
3045 }
3046 
3047 /*
3048  * Set the exchange_id flags returned by the server.
3049  */
3050 static void
3051 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3052 {
3053 #ifdef CONFIG_NFSD_PNFS
3054 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3055 #else
3056 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3057 #endif
3058 
3059 	/* Referrals are supported, Migration is not. */
3060 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3061 
3062 	/* set the wire flags to return to client. */
3063 	clid->flags = new->cl_exchange_flags;
3064 }
3065 
3066 static bool client_has_openowners(struct nfs4_client *clp)
3067 {
3068 	struct nfs4_openowner *oo;
3069 
3070 	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3071 		if (!list_empty(&oo->oo_owner.so_stateids))
3072 			return true;
3073 	}
3074 	return false;
3075 }
3076 
3077 static bool client_has_state(struct nfs4_client *clp)
3078 {
3079 	return client_has_openowners(clp)
3080 #ifdef CONFIG_NFSD_PNFS
3081 		|| !list_empty(&clp->cl_lo_states)
3082 #endif
3083 		|| !list_empty(&clp->cl_delegations)
3084 		|| !list_empty(&clp->cl_sessions)
3085 		|| !list_empty(&clp->async_copies);
3086 }
3087 
3088 static __be32 copy_impl_id(struct nfs4_client *clp,
3089 				struct nfsd4_exchange_id *exid)
3090 {
3091 	if (!exid->nii_domain.data)
3092 		return 0;
3093 	xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3094 	if (!clp->cl_nii_domain.data)
3095 		return nfserr_jukebox;
3096 	xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3097 	if (!clp->cl_nii_name.data)
3098 		return nfserr_jukebox;
3099 	clp->cl_nii_time = exid->nii_time;
3100 	return 0;
3101 }
3102 
3103 __be32
3104 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3105 		union nfsd4_op_u *u)
3106 {
3107 	struct nfsd4_exchange_id *exid = &u->exchange_id;
3108 	struct nfs4_client *conf, *new;
3109 	struct nfs4_client *unconf = NULL;
3110 	__be32 status;
3111 	char			addr_str[INET6_ADDRSTRLEN];
3112 	nfs4_verifier		verf = exid->verifier;
3113 	struct sockaddr		*sa = svc_addr(rqstp);
3114 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3115 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3116 
3117 	rpc_ntop(sa, addr_str, sizeof(addr_str));
3118 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3119 		"ip_addr=%s flags %x, spa_how %u\n",
3120 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
3121 		addr_str, exid->flags, exid->spa_how);
3122 
3123 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3124 		return nfserr_inval;
3125 
3126 	new = create_client(exid->clname, rqstp, &verf);
3127 	if (new == NULL)
3128 		return nfserr_jukebox;
3129 	status = copy_impl_id(new, exid);
3130 	if (status)
3131 		goto out_nolock;
3132 
3133 	switch (exid->spa_how) {
3134 	case SP4_MACH_CRED:
3135 		exid->spo_must_enforce[0] = 0;
3136 		exid->spo_must_enforce[1] = (
3137 			1 << (OP_BIND_CONN_TO_SESSION - 32) |
3138 			1 << (OP_EXCHANGE_ID - 32) |
3139 			1 << (OP_CREATE_SESSION - 32) |
3140 			1 << (OP_DESTROY_SESSION - 32) |
3141 			1 << (OP_DESTROY_CLIENTID - 32));
3142 
3143 		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3144 					1 << (OP_OPEN_DOWNGRADE) |
3145 					1 << (OP_LOCKU) |
3146 					1 << (OP_DELEGRETURN));
3147 
3148 		exid->spo_must_allow[1] &= (
3149 					1 << (OP_TEST_STATEID - 32) |
3150 					1 << (OP_FREE_STATEID - 32));
3151 		if (!svc_rqst_integrity_protected(rqstp)) {
3152 			status = nfserr_inval;
3153 			goto out_nolock;
3154 		}
3155 		/*
3156 		 * Sometimes userspace doesn't give us a principal.
3157 		 * Which is a bug, really.  Anyway, we can't enforce
3158 		 * MACH_CRED in that case, better to give up now:
3159 		 */
3160 		if (!new->cl_cred.cr_principal &&
3161 					!new->cl_cred.cr_raw_principal) {
3162 			status = nfserr_serverfault;
3163 			goto out_nolock;
3164 		}
3165 		new->cl_mach_cred = true;
3166 		break;
3167 	case SP4_NONE:
3168 		break;
3169 	default:				/* checked by xdr code */
3170 		WARN_ON_ONCE(1);
3171 		fallthrough;
3172 	case SP4_SSV:
3173 		status = nfserr_encr_alg_unsupp;
3174 		goto out_nolock;
3175 	}
3176 
3177 	/* Cases below refer to rfc 5661 section 18.35.4: */
3178 	spin_lock(&nn->client_lock);
3179 	conf = find_confirmed_client_by_name(&exid->clname, nn);
3180 	if (conf) {
3181 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3182 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3183 
3184 		if (update) {
3185 			if (!clp_used_exchangeid(conf)) { /* buggy client */
3186 				status = nfserr_inval;
3187 				goto out;
3188 			}
3189 			if (!nfsd4_mach_creds_match(conf, rqstp)) {
3190 				status = nfserr_wrong_cred;
3191 				goto out;
3192 			}
3193 			if (!creds_match) { /* case 9 */
3194 				status = nfserr_perm;
3195 				goto out;
3196 			}
3197 			if (!verfs_match) { /* case 8 */
3198 				status = nfserr_not_same;
3199 				goto out;
3200 			}
3201 			/* case 6 */
3202 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3203 			trace_nfsd_clid_confirmed_r(conf);
3204 			goto out_copy;
3205 		}
3206 		if (!creds_match) { /* case 3 */
3207 			if (client_has_state(conf)) {
3208 				status = nfserr_clid_inuse;
3209 				trace_nfsd_clid_cred_mismatch(conf, rqstp);
3210 				goto out;
3211 			}
3212 			goto out_new;
3213 		}
3214 		if (verfs_match) { /* case 2 */
3215 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3216 			trace_nfsd_clid_confirmed_r(conf);
3217 			goto out_copy;
3218 		}
3219 		/* case 5, client reboot */
3220 		trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3221 		conf = NULL;
3222 		goto out_new;
3223 	}
3224 
3225 	if (update) { /* case 7 */
3226 		status = nfserr_noent;
3227 		goto out;
3228 	}
3229 
3230 	unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3231 	if (unconf) /* case 4, possible retry or client restart */
3232 		unhash_client_locked(unconf);
3233 
3234 	/* case 1, new owner ID */
3235 	trace_nfsd_clid_fresh(new);
3236 
3237 out_new:
3238 	if (conf) {
3239 		status = mark_client_expired_locked(conf);
3240 		if (status)
3241 			goto out;
3242 		trace_nfsd_clid_replaced(&conf->cl_clientid);
3243 	}
3244 	new->cl_minorversion = cstate->minorversion;
3245 	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3246 	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3247 
3248 	add_to_unconfirmed(new);
3249 	swap(new, conf);
3250 out_copy:
3251 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3252 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
3253 
3254 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3255 	nfsd4_set_ex_flags(conf, exid);
3256 
3257 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3258 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3259 	status = nfs_ok;
3260 
3261 out:
3262 	spin_unlock(&nn->client_lock);
3263 out_nolock:
3264 	if (new)
3265 		expire_client(new);
3266 	if (unconf) {
3267 		trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3268 		expire_client(unconf);
3269 	}
3270 	return status;
3271 }
3272 
3273 static __be32
3274 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3275 {
3276 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3277 		slot_seqid);
3278 
3279 	/* The slot is in use, and no response has been sent. */
3280 	if (slot_inuse) {
3281 		if (seqid == slot_seqid)
3282 			return nfserr_jukebox;
3283 		else
3284 			return nfserr_seq_misordered;
3285 	}
3286 	/* Note unsigned 32-bit arithmetic handles wraparound: */
3287 	if (likely(seqid == slot_seqid + 1))
3288 		return nfs_ok;
3289 	if (seqid == slot_seqid)
3290 		return nfserr_replay_cache;
3291 	return nfserr_seq_misordered;
3292 }
3293 
3294 /*
3295  * Cache the create session result into the create session single DRC
3296  * slot cache by saving the xdr structure. sl_seqid has been set.
3297  * Do this for solo or embedded create session operations.
3298  */
3299 static void
3300 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3301 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
3302 {
3303 	slot->sl_status = nfserr;
3304 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3305 }
3306 
3307 static __be32
3308 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3309 			    struct nfsd4_clid_slot *slot)
3310 {
3311 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3312 	return slot->sl_status;
3313 }
3314 
3315 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
3316 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3317 			1 +	/* MIN tag is length with zero, only length */ \
3318 			3 +	/* version, opcount, opcode */ \
3319 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3320 				/* seqid, slotID, slotID, cache */ \
3321 			4 ) * sizeof(__be32))
3322 
3323 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3324 			2 +	/* verifier: AUTH_NULL, length 0 */\
3325 			1 +	/* status */ \
3326 			1 +	/* MIN tag is length with zero, only length */ \
3327 			3 +	/* opcount, opcode, opstatus*/ \
3328 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3329 				/* seqid, slotID, slotID, slotID, status */ \
3330 			5 ) * sizeof(__be32))
3331 
3332 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3333 {
3334 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3335 
3336 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3337 		return nfserr_toosmall;
3338 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3339 		return nfserr_toosmall;
3340 	ca->headerpadsz = 0;
3341 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3342 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3343 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3344 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3345 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3346 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3347 	/*
3348 	 * Note decreasing slot size below client's request may make it
3349 	 * difficult for client to function correctly, whereas
3350 	 * decreasing the number of slots will (just?) affect
3351 	 * performance.  When short on memory we therefore prefer to
3352 	 * decrease number of slots instead of their size.  Clients that
3353 	 * request larger slots than they need will get poor results:
3354 	 * Note that we always allow at least one slot, because our
3355 	 * accounting is soft and provides no guarantees either way.
3356 	 */
3357 	ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3358 
3359 	return nfs_ok;
3360 }
3361 
3362 /*
3363  * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3364  * These are based on similar macros in linux/sunrpc/msg_prot.h .
3365  */
3366 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3367 	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3368 
3369 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3370 	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3371 
3372 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
3373 				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3374 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
3375 				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3376 				 sizeof(__be32))
3377 
3378 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3379 {
3380 	ca->headerpadsz = 0;
3381 
3382 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3383 		return nfserr_toosmall;
3384 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3385 		return nfserr_toosmall;
3386 	ca->maxresp_cached = 0;
3387 	if (ca->maxops < 2)
3388 		return nfserr_toosmall;
3389 
3390 	return nfs_ok;
3391 }
3392 
3393 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3394 {
3395 	switch (cbs->flavor) {
3396 	case RPC_AUTH_NULL:
3397 	case RPC_AUTH_UNIX:
3398 		return nfs_ok;
3399 	default:
3400 		/*
3401 		 * GSS case: the spec doesn't allow us to return this
3402 		 * error.  But it also doesn't allow us not to support
3403 		 * GSS.
3404 		 * I'd rather this fail hard than return some error the
3405 		 * client might think it can already handle:
3406 		 */
3407 		return nfserr_encr_alg_unsupp;
3408 	}
3409 }
3410 
3411 __be32
3412 nfsd4_create_session(struct svc_rqst *rqstp,
3413 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3414 {
3415 	struct nfsd4_create_session *cr_ses = &u->create_session;
3416 	struct sockaddr *sa = svc_addr(rqstp);
3417 	struct nfs4_client *conf, *unconf;
3418 	struct nfs4_client *old = NULL;
3419 	struct nfsd4_session *new;
3420 	struct nfsd4_conn *conn;
3421 	struct nfsd4_clid_slot *cs_slot = NULL;
3422 	__be32 status = 0;
3423 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3424 
3425 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3426 		return nfserr_inval;
3427 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3428 	if (status)
3429 		return status;
3430 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3431 	if (status)
3432 		return status;
3433 	status = check_backchannel_attrs(&cr_ses->back_channel);
3434 	if (status)
3435 		goto out_release_drc_mem;
3436 	status = nfserr_jukebox;
3437 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3438 	if (!new)
3439 		goto out_release_drc_mem;
3440 	conn = alloc_conn_from_crses(rqstp, cr_ses);
3441 	if (!conn)
3442 		goto out_free_session;
3443 
3444 	spin_lock(&nn->client_lock);
3445 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3446 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3447 	WARN_ON_ONCE(conf && unconf);
3448 
3449 	if (conf) {
3450 		status = nfserr_wrong_cred;
3451 		if (!nfsd4_mach_creds_match(conf, rqstp))
3452 			goto out_free_conn;
3453 		cs_slot = &conf->cl_cs_slot;
3454 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3455 		if (status) {
3456 			if (status == nfserr_replay_cache)
3457 				status = nfsd4_replay_create_session(cr_ses, cs_slot);
3458 			goto out_free_conn;
3459 		}
3460 	} else if (unconf) {
3461 		status = nfserr_clid_inuse;
3462 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3463 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3464 			trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3465 			goto out_free_conn;
3466 		}
3467 		status = nfserr_wrong_cred;
3468 		if (!nfsd4_mach_creds_match(unconf, rqstp))
3469 			goto out_free_conn;
3470 		cs_slot = &unconf->cl_cs_slot;
3471 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3472 		if (status) {
3473 			/* an unconfirmed replay returns misordered */
3474 			status = nfserr_seq_misordered;
3475 			goto out_free_conn;
3476 		}
3477 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3478 		if (old) {
3479 			status = mark_client_expired_locked(old);
3480 			if (status) {
3481 				old = NULL;
3482 				goto out_free_conn;
3483 			}
3484 			trace_nfsd_clid_replaced(&old->cl_clientid);
3485 		}
3486 		move_to_confirmed(unconf);
3487 		conf = unconf;
3488 	} else {
3489 		status = nfserr_stale_clientid;
3490 		goto out_free_conn;
3491 	}
3492 	status = nfs_ok;
3493 	/* Persistent sessions are not supported */
3494 	cr_ses->flags &= ~SESSION4_PERSIST;
3495 	/* Upshifting from TCP to RDMA is not supported */
3496 	cr_ses->flags &= ~SESSION4_RDMA;
3497 
3498 	init_session(rqstp, new, conf, cr_ses);
3499 	nfsd4_get_session_locked(new);
3500 
3501 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3502 	       NFS4_MAX_SESSIONID_LEN);
3503 	cs_slot->sl_seqid++;
3504 	cr_ses->seqid = cs_slot->sl_seqid;
3505 
3506 	/* cache solo and embedded create sessions under the client_lock */
3507 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
3508 	spin_unlock(&nn->client_lock);
3509 	if (conf == unconf)
3510 		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3511 	/* init connection and backchannel */
3512 	nfsd4_init_conn(rqstp, conn, new);
3513 	nfsd4_put_session(new);
3514 	if (old)
3515 		expire_client(old);
3516 	return status;
3517 out_free_conn:
3518 	spin_unlock(&nn->client_lock);
3519 	free_conn(conn);
3520 	if (old)
3521 		expire_client(old);
3522 out_free_session:
3523 	__free_session(new);
3524 out_release_drc_mem:
3525 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
3526 	return status;
3527 }
3528 
3529 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3530 {
3531 	switch (*dir) {
3532 	case NFS4_CDFC4_FORE:
3533 	case NFS4_CDFC4_BACK:
3534 		return nfs_ok;
3535 	case NFS4_CDFC4_FORE_OR_BOTH:
3536 	case NFS4_CDFC4_BACK_OR_BOTH:
3537 		*dir = NFS4_CDFC4_BOTH;
3538 		return nfs_ok;
3539 	}
3540 	return nfserr_inval;
3541 }
3542 
3543 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3544 		struct nfsd4_compound_state *cstate,
3545 		union nfsd4_op_u *u)
3546 {
3547 	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3548 	struct nfsd4_session *session = cstate->session;
3549 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3550 	__be32 status;
3551 
3552 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3553 	if (status)
3554 		return status;
3555 	spin_lock(&nn->client_lock);
3556 	session->se_cb_prog = bc->bc_cb_program;
3557 	session->se_cb_sec = bc->bc_cb_sec;
3558 	spin_unlock(&nn->client_lock);
3559 
3560 	nfsd4_probe_callback(session->se_client);
3561 
3562 	return nfs_ok;
3563 }
3564 
3565 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3566 {
3567 	struct nfsd4_conn *c;
3568 
3569 	list_for_each_entry(c, &s->se_conns, cn_persession) {
3570 		if (c->cn_xprt == xpt) {
3571 			return c;
3572 		}
3573 	}
3574 	return NULL;
3575 }
3576 
3577 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3578 		struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3579 {
3580 	struct nfs4_client *clp = session->se_client;
3581 	struct svc_xprt *xpt = rqst->rq_xprt;
3582 	struct nfsd4_conn *c;
3583 	__be32 status;
3584 
3585 	/* Following the last paragraph of RFC 5661 Section 18.34.3: */
3586 	spin_lock(&clp->cl_lock);
3587 	c = __nfsd4_find_conn(xpt, session);
3588 	if (!c)
3589 		status = nfserr_noent;
3590 	else if (req == c->cn_flags)
3591 		status = nfs_ok;
3592 	else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3593 				c->cn_flags != NFS4_CDFC4_BACK)
3594 		status = nfs_ok;
3595 	else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3596 				c->cn_flags != NFS4_CDFC4_FORE)
3597 		status = nfs_ok;
3598 	else
3599 		status = nfserr_inval;
3600 	spin_unlock(&clp->cl_lock);
3601 	if (status == nfs_ok && conn)
3602 		*conn = c;
3603 	return status;
3604 }
3605 
3606 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3607 		     struct nfsd4_compound_state *cstate,
3608 		     union nfsd4_op_u *u)
3609 {
3610 	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3611 	__be32 status;
3612 	struct nfsd4_conn *conn;
3613 	struct nfsd4_session *session;
3614 	struct net *net = SVC_NET(rqstp);
3615 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3616 
3617 	if (!nfsd4_last_compound_op(rqstp))
3618 		return nfserr_not_only_op;
3619 	spin_lock(&nn->client_lock);
3620 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3621 	spin_unlock(&nn->client_lock);
3622 	if (!session)
3623 		goto out_no_session;
3624 	status = nfserr_wrong_cred;
3625 	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3626 		goto out;
3627 	status = nfsd4_match_existing_connection(rqstp, session,
3628 			bcts->dir, &conn);
3629 	if (status == nfs_ok) {
3630 		if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3631 				bcts->dir == NFS4_CDFC4_BACK)
3632 			conn->cn_flags |= NFS4_CDFC4_BACK;
3633 		nfsd4_probe_callback(session->se_client);
3634 		goto out;
3635 	}
3636 	if (status == nfserr_inval)
3637 		goto out;
3638 	status = nfsd4_map_bcts_dir(&bcts->dir);
3639 	if (status)
3640 		goto out;
3641 	conn = alloc_conn(rqstp, bcts->dir);
3642 	status = nfserr_jukebox;
3643 	if (!conn)
3644 		goto out;
3645 	nfsd4_init_conn(rqstp, conn, session);
3646 	status = nfs_ok;
3647 out:
3648 	nfsd4_put_session(session);
3649 out_no_session:
3650 	return status;
3651 }
3652 
3653 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3654 {
3655 	if (!cstate->session)
3656 		return false;
3657 	return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3658 }
3659 
3660 __be32
3661 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3662 		union nfsd4_op_u *u)
3663 {
3664 	struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3665 	struct nfsd4_session *ses;
3666 	__be32 status;
3667 	int ref_held_by_me = 0;
3668 	struct net *net = SVC_NET(r);
3669 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3670 
3671 	status = nfserr_not_only_op;
3672 	if (nfsd4_compound_in_session(cstate, sessionid)) {
3673 		if (!nfsd4_last_compound_op(r))
3674 			goto out;
3675 		ref_held_by_me++;
3676 	}
3677 	dump_sessionid(__func__, sessionid);
3678 	spin_lock(&nn->client_lock);
3679 	ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3680 	if (!ses)
3681 		goto out_client_lock;
3682 	status = nfserr_wrong_cred;
3683 	if (!nfsd4_mach_creds_match(ses->se_client, r))
3684 		goto out_put_session;
3685 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3686 	if (status)
3687 		goto out_put_session;
3688 	unhash_session(ses);
3689 	spin_unlock(&nn->client_lock);
3690 
3691 	nfsd4_probe_callback_sync(ses->se_client);
3692 
3693 	spin_lock(&nn->client_lock);
3694 	status = nfs_ok;
3695 out_put_session:
3696 	nfsd4_put_session_locked(ses);
3697 out_client_lock:
3698 	spin_unlock(&nn->client_lock);
3699 out:
3700 	return status;
3701 }
3702 
3703 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3704 {
3705 	struct nfs4_client *clp = ses->se_client;
3706 	struct nfsd4_conn *c;
3707 	__be32 status = nfs_ok;
3708 	int ret;
3709 
3710 	spin_lock(&clp->cl_lock);
3711 	c = __nfsd4_find_conn(new->cn_xprt, ses);
3712 	if (c)
3713 		goto out_free;
3714 	status = nfserr_conn_not_bound_to_session;
3715 	if (clp->cl_mach_cred)
3716 		goto out_free;
3717 	__nfsd4_hash_conn(new, ses);
3718 	spin_unlock(&clp->cl_lock);
3719 	ret = nfsd4_register_conn(new);
3720 	if (ret)
3721 		/* oops; xprt is already down: */
3722 		nfsd4_conn_lost(&new->cn_xpt_user);
3723 	return nfs_ok;
3724 out_free:
3725 	spin_unlock(&clp->cl_lock);
3726 	free_conn(new);
3727 	return status;
3728 }
3729 
3730 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3731 {
3732 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
3733 
3734 	return args->opcnt > session->se_fchannel.maxops;
3735 }
3736 
3737 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3738 				  struct nfsd4_session *session)
3739 {
3740 	struct xdr_buf *xb = &rqstp->rq_arg;
3741 
3742 	return xb->len > session->se_fchannel.maxreq_sz;
3743 }
3744 
3745 static bool replay_matches_cache(struct svc_rqst *rqstp,
3746 		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3747 {
3748 	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3749 
3750 	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3751 	    (bool)seq->cachethis)
3752 		return false;
3753 	/*
3754 	 * If there's an error then the reply can have fewer ops than
3755 	 * the call.
3756 	 */
3757 	if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3758 		return false;
3759 	/*
3760 	 * But if we cached a reply with *more* ops than the call you're
3761 	 * sending us now, then this new call is clearly not really a
3762 	 * replay of the old one:
3763 	 */
3764 	if (slot->sl_opcnt > argp->opcnt)
3765 		return false;
3766 	/* This is the only check explicitly called by spec: */
3767 	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3768 		return false;
3769 	/*
3770 	 * There may be more comparisons we could actually do, but the
3771 	 * spec doesn't require us to catch every case where the calls
3772 	 * don't match (that would require caching the call as well as
3773 	 * the reply), so we don't bother.
3774 	 */
3775 	return true;
3776 }
3777 
3778 __be32
3779 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3780 		union nfsd4_op_u *u)
3781 {
3782 	struct nfsd4_sequence *seq = &u->sequence;
3783 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3784 	struct xdr_stream *xdr = resp->xdr;
3785 	struct nfsd4_session *session;
3786 	struct nfs4_client *clp;
3787 	struct nfsd4_slot *slot;
3788 	struct nfsd4_conn *conn;
3789 	__be32 status;
3790 	int buflen;
3791 	struct net *net = SVC_NET(rqstp);
3792 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3793 
3794 	if (resp->opcnt != 1)
3795 		return nfserr_sequence_pos;
3796 
3797 	/*
3798 	 * Will be either used or freed by nfsd4_sequence_check_conn
3799 	 * below.
3800 	 */
3801 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3802 	if (!conn)
3803 		return nfserr_jukebox;
3804 
3805 	spin_lock(&nn->client_lock);
3806 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3807 	if (!session)
3808 		goto out_no_session;
3809 	clp = session->se_client;
3810 
3811 	status = nfserr_too_many_ops;
3812 	if (nfsd4_session_too_many_ops(rqstp, session))
3813 		goto out_put_session;
3814 
3815 	status = nfserr_req_too_big;
3816 	if (nfsd4_request_too_big(rqstp, session))
3817 		goto out_put_session;
3818 
3819 	status = nfserr_badslot;
3820 	if (seq->slotid >= session->se_fchannel.maxreqs)
3821 		goto out_put_session;
3822 
3823 	slot = session->se_slots[seq->slotid];
3824 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
3825 
3826 	/* We do not negotiate the number of slots yet, so set the
3827 	 * maxslots to the session maxreqs which is used to encode
3828 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
3829 	seq->maxslots = session->se_fchannel.maxreqs;
3830 
3831 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3832 					slot->sl_flags & NFSD4_SLOT_INUSE);
3833 	if (status == nfserr_replay_cache) {
3834 		status = nfserr_seq_misordered;
3835 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3836 			goto out_put_session;
3837 		status = nfserr_seq_false_retry;
3838 		if (!replay_matches_cache(rqstp, seq, slot))
3839 			goto out_put_session;
3840 		cstate->slot = slot;
3841 		cstate->session = session;
3842 		cstate->clp = clp;
3843 		/* Return the cached reply status and set cstate->status
3844 		 * for nfsd4_proc_compound processing */
3845 		status = nfsd4_replay_cache_entry(resp, seq);
3846 		cstate->status = nfserr_replay_cache;
3847 		goto out;
3848 	}
3849 	if (status)
3850 		goto out_put_session;
3851 
3852 	status = nfsd4_sequence_check_conn(conn, session);
3853 	conn = NULL;
3854 	if (status)
3855 		goto out_put_session;
3856 
3857 	buflen = (seq->cachethis) ?
3858 			session->se_fchannel.maxresp_cached :
3859 			session->se_fchannel.maxresp_sz;
3860 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3861 				    nfserr_rep_too_big;
3862 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3863 		goto out_put_session;
3864 	svc_reserve(rqstp, buflen);
3865 
3866 	status = nfs_ok;
3867 	/* Success! bump slot seqid */
3868 	slot->sl_seqid = seq->seqid;
3869 	slot->sl_flags |= NFSD4_SLOT_INUSE;
3870 	if (seq->cachethis)
3871 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3872 	else
3873 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3874 
3875 	cstate->slot = slot;
3876 	cstate->session = session;
3877 	cstate->clp = clp;
3878 
3879 out:
3880 	switch (clp->cl_cb_state) {
3881 	case NFSD4_CB_DOWN:
3882 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3883 		break;
3884 	case NFSD4_CB_FAULT:
3885 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3886 		break;
3887 	default:
3888 		seq->status_flags = 0;
3889 	}
3890 	if (!list_empty(&clp->cl_revoked))
3891 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3892 out_no_session:
3893 	if (conn)
3894 		free_conn(conn);
3895 	spin_unlock(&nn->client_lock);
3896 	return status;
3897 out_put_session:
3898 	nfsd4_put_session_locked(session);
3899 	goto out_no_session;
3900 }
3901 
3902 void
3903 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3904 {
3905 	struct nfsd4_compound_state *cs = &resp->cstate;
3906 
3907 	if (nfsd4_has_session(cs)) {
3908 		if (cs->status != nfserr_replay_cache) {
3909 			nfsd4_store_cache_entry(resp);
3910 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3911 		}
3912 		/* Drop session reference that was taken in nfsd4_sequence() */
3913 		nfsd4_put_session(cs->session);
3914 	} else if (cs->clp)
3915 		put_client_renew(cs->clp);
3916 }
3917 
3918 __be32
3919 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3920 		struct nfsd4_compound_state *cstate,
3921 		union nfsd4_op_u *u)
3922 {
3923 	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3924 	struct nfs4_client *conf, *unconf;
3925 	struct nfs4_client *clp = NULL;
3926 	__be32 status = 0;
3927 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3928 
3929 	spin_lock(&nn->client_lock);
3930 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3931 	conf = find_confirmed_client(&dc->clientid, true, nn);
3932 	WARN_ON_ONCE(conf && unconf);
3933 
3934 	if (conf) {
3935 		if (client_has_state(conf)) {
3936 			status = nfserr_clientid_busy;
3937 			goto out;
3938 		}
3939 		status = mark_client_expired_locked(conf);
3940 		if (status)
3941 			goto out;
3942 		clp = conf;
3943 	} else if (unconf)
3944 		clp = unconf;
3945 	else {
3946 		status = nfserr_stale_clientid;
3947 		goto out;
3948 	}
3949 	if (!nfsd4_mach_creds_match(clp, rqstp)) {
3950 		clp = NULL;
3951 		status = nfserr_wrong_cred;
3952 		goto out;
3953 	}
3954 	trace_nfsd_clid_destroyed(&clp->cl_clientid);
3955 	unhash_client_locked(clp);
3956 out:
3957 	spin_unlock(&nn->client_lock);
3958 	if (clp)
3959 		expire_client(clp);
3960 	return status;
3961 }
3962 
3963 __be32
3964 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3965 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3966 {
3967 	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3968 	struct nfs4_client *clp = cstate->clp;
3969 	__be32 status = 0;
3970 
3971 	if (rc->rca_one_fs) {
3972 		if (!cstate->current_fh.fh_dentry)
3973 			return nfserr_nofilehandle;
3974 		/*
3975 		 * We don't take advantage of the rca_one_fs case.
3976 		 * That's OK, it's optional, we can safely ignore it.
3977 		 */
3978 		return nfs_ok;
3979 	}
3980 
3981 	status = nfserr_complete_already;
3982 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
3983 		goto out;
3984 
3985 	status = nfserr_stale_clientid;
3986 	if (is_client_expired(clp))
3987 		/*
3988 		 * The following error isn't really legal.
3989 		 * But we only get here if the client just explicitly
3990 		 * destroyed the client.  Surely it no longer cares what
3991 		 * error it gets back on an operation for the dead
3992 		 * client.
3993 		 */
3994 		goto out;
3995 
3996 	status = nfs_ok;
3997 	trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
3998 	nfsd4_client_record_create(clp);
3999 	inc_reclaim_complete(clp);
4000 out:
4001 	return status;
4002 }
4003 
4004 __be32
4005 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4006 		  union nfsd4_op_u *u)
4007 {
4008 	struct nfsd4_setclientid *setclid = &u->setclientid;
4009 	struct xdr_netobj 	clname = setclid->se_name;
4010 	nfs4_verifier		clverifier = setclid->se_verf;
4011 	struct nfs4_client	*conf, *new;
4012 	struct nfs4_client	*unconf = NULL;
4013 	__be32 			status;
4014 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4015 
4016 	new = create_client(clname, rqstp, &clverifier);
4017 	if (new == NULL)
4018 		return nfserr_jukebox;
4019 	spin_lock(&nn->client_lock);
4020 	conf = find_confirmed_client_by_name(&clname, nn);
4021 	if (conf && client_has_state(conf)) {
4022 		status = nfserr_clid_inuse;
4023 		if (clp_used_exchangeid(conf))
4024 			goto out;
4025 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4026 			trace_nfsd_clid_cred_mismatch(conf, rqstp);
4027 			goto out;
4028 		}
4029 	}
4030 	unconf = find_unconfirmed_client_by_name(&clname, nn);
4031 	if (unconf)
4032 		unhash_client_locked(unconf);
4033 	if (conf) {
4034 		if (same_verf(&conf->cl_verifier, &clverifier)) {
4035 			copy_clid(new, conf);
4036 			gen_confirm(new, nn);
4037 		} else
4038 			trace_nfsd_clid_verf_mismatch(conf, rqstp,
4039 						      &clverifier);
4040 	} else
4041 		trace_nfsd_clid_fresh(new);
4042 	new->cl_minorversion = 0;
4043 	gen_callback(new, setclid, rqstp);
4044 	add_to_unconfirmed(new);
4045 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4046 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4047 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4048 	new = NULL;
4049 	status = nfs_ok;
4050 out:
4051 	spin_unlock(&nn->client_lock);
4052 	if (new)
4053 		free_client(new);
4054 	if (unconf) {
4055 		trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4056 		expire_client(unconf);
4057 	}
4058 	return status;
4059 }
4060 
4061 __be32
4062 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4063 			struct nfsd4_compound_state *cstate,
4064 			union nfsd4_op_u *u)
4065 {
4066 	struct nfsd4_setclientid_confirm *setclientid_confirm =
4067 			&u->setclientid_confirm;
4068 	struct nfs4_client *conf, *unconf;
4069 	struct nfs4_client *old = NULL;
4070 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4071 	clientid_t * clid = &setclientid_confirm->sc_clientid;
4072 	__be32 status;
4073 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4074 
4075 	if (STALE_CLIENTID(clid, nn))
4076 		return nfserr_stale_clientid;
4077 
4078 	spin_lock(&nn->client_lock);
4079 	conf = find_confirmed_client(clid, false, nn);
4080 	unconf = find_unconfirmed_client(clid, false, nn);
4081 	/*
4082 	 * We try hard to give out unique clientid's, so if we get an
4083 	 * attempt to confirm the same clientid with a different cred,
4084 	 * the client may be buggy; this should never happen.
4085 	 *
4086 	 * Nevertheless, RFC 7530 recommends INUSE for this case:
4087 	 */
4088 	status = nfserr_clid_inuse;
4089 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4090 		trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4091 		goto out;
4092 	}
4093 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4094 		trace_nfsd_clid_cred_mismatch(conf, rqstp);
4095 		goto out;
4096 	}
4097 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4098 		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4099 			status = nfs_ok;
4100 		} else
4101 			status = nfserr_stale_clientid;
4102 		goto out;
4103 	}
4104 	status = nfs_ok;
4105 	if (conf) {
4106 		old = unconf;
4107 		unhash_client_locked(old);
4108 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4109 	} else {
4110 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4111 		if (old) {
4112 			status = nfserr_clid_inuse;
4113 			if (client_has_state(old)
4114 					&& !same_creds(&unconf->cl_cred,
4115 							&old->cl_cred))
4116 				goto out;
4117 			status = mark_client_expired_locked(old);
4118 			if (status) {
4119 				old = NULL;
4120 				goto out;
4121 			}
4122 			trace_nfsd_clid_replaced(&old->cl_clientid);
4123 		}
4124 		move_to_confirmed(unconf);
4125 		conf = unconf;
4126 	}
4127 	get_client_locked(conf);
4128 	spin_unlock(&nn->client_lock);
4129 	if (conf == unconf)
4130 		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4131 	nfsd4_probe_callback(conf);
4132 	spin_lock(&nn->client_lock);
4133 	put_client_renew_locked(conf);
4134 out:
4135 	spin_unlock(&nn->client_lock);
4136 	if (old)
4137 		expire_client(old);
4138 	return status;
4139 }
4140 
4141 static struct nfs4_file *nfsd4_alloc_file(void)
4142 {
4143 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
4144 }
4145 
4146 /* OPEN Share state helper functions */
4147 static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
4148 				struct nfs4_file *fp)
4149 {
4150 	lockdep_assert_held(&state_lock);
4151 
4152 	refcount_set(&fp->fi_ref, 1);
4153 	spin_lock_init(&fp->fi_lock);
4154 	INIT_LIST_HEAD(&fp->fi_stateids);
4155 	INIT_LIST_HEAD(&fp->fi_delegations);
4156 	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4157 	fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4158 	fp->fi_deleg_file = NULL;
4159 	fp->fi_had_conflict = false;
4160 	fp->fi_share_deny = 0;
4161 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4162 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
4163 	fp->fi_aliased = false;
4164 	fp->fi_inode = d_inode(fh->fh_dentry);
4165 #ifdef CONFIG_NFSD_PNFS
4166 	INIT_LIST_HEAD(&fp->fi_lo_states);
4167 	atomic_set(&fp->fi_lo_recalls, 0);
4168 #endif
4169 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4170 }
4171 
4172 void
4173 nfsd4_free_slabs(void)
4174 {
4175 	kmem_cache_destroy(client_slab);
4176 	kmem_cache_destroy(openowner_slab);
4177 	kmem_cache_destroy(lockowner_slab);
4178 	kmem_cache_destroy(file_slab);
4179 	kmem_cache_destroy(stateid_slab);
4180 	kmem_cache_destroy(deleg_slab);
4181 	kmem_cache_destroy(odstate_slab);
4182 }
4183 
4184 int
4185 nfsd4_init_slabs(void)
4186 {
4187 	client_slab = kmem_cache_create("nfsd4_clients",
4188 			sizeof(struct nfs4_client), 0, 0, NULL);
4189 	if (client_slab == NULL)
4190 		goto out;
4191 	openowner_slab = kmem_cache_create("nfsd4_openowners",
4192 			sizeof(struct nfs4_openowner), 0, 0, NULL);
4193 	if (openowner_slab == NULL)
4194 		goto out_free_client_slab;
4195 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4196 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
4197 	if (lockowner_slab == NULL)
4198 		goto out_free_openowner_slab;
4199 	file_slab = kmem_cache_create("nfsd4_files",
4200 			sizeof(struct nfs4_file), 0, 0, NULL);
4201 	if (file_slab == NULL)
4202 		goto out_free_lockowner_slab;
4203 	stateid_slab = kmem_cache_create("nfsd4_stateids",
4204 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4205 	if (stateid_slab == NULL)
4206 		goto out_free_file_slab;
4207 	deleg_slab = kmem_cache_create("nfsd4_delegations",
4208 			sizeof(struct nfs4_delegation), 0, 0, NULL);
4209 	if (deleg_slab == NULL)
4210 		goto out_free_stateid_slab;
4211 	odstate_slab = kmem_cache_create("nfsd4_odstate",
4212 			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4213 	if (odstate_slab == NULL)
4214 		goto out_free_deleg_slab;
4215 	return 0;
4216 
4217 out_free_deleg_slab:
4218 	kmem_cache_destroy(deleg_slab);
4219 out_free_stateid_slab:
4220 	kmem_cache_destroy(stateid_slab);
4221 out_free_file_slab:
4222 	kmem_cache_destroy(file_slab);
4223 out_free_lockowner_slab:
4224 	kmem_cache_destroy(lockowner_slab);
4225 out_free_openowner_slab:
4226 	kmem_cache_destroy(openowner_slab);
4227 out_free_client_slab:
4228 	kmem_cache_destroy(client_slab);
4229 out:
4230 	return -ENOMEM;
4231 }
4232 
4233 static void init_nfs4_replay(struct nfs4_replay *rp)
4234 {
4235 	rp->rp_status = nfserr_serverfault;
4236 	rp->rp_buflen = 0;
4237 	rp->rp_buf = rp->rp_ibuf;
4238 	mutex_init(&rp->rp_mutex);
4239 }
4240 
4241 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4242 		struct nfs4_stateowner *so)
4243 {
4244 	if (!nfsd4_has_session(cstate)) {
4245 		mutex_lock(&so->so_replay.rp_mutex);
4246 		cstate->replay_owner = nfs4_get_stateowner(so);
4247 	}
4248 }
4249 
4250 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4251 {
4252 	struct nfs4_stateowner *so = cstate->replay_owner;
4253 
4254 	if (so != NULL) {
4255 		cstate->replay_owner = NULL;
4256 		mutex_unlock(&so->so_replay.rp_mutex);
4257 		nfs4_put_stateowner(so);
4258 	}
4259 }
4260 
4261 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4262 {
4263 	struct nfs4_stateowner *sop;
4264 
4265 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
4266 	if (!sop)
4267 		return NULL;
4268 
4269 	xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4270 	if (!sop->so_owner.data) {
4271 		kmem_cache_free(slab, sop);
4272 		return NULL;
4273 	}
4274 
4275 	INIT_LIST_HEAD(&sop->so_stateids);
4276 	sop->so_client = clp;
4277 	init_nfs4_replay(&sop->so_replay);
4278 	atomic_set(&sop->so_count, 1);
4279 	return sop;
4280 }
4281 
4282 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4283 {
4284 	lockdep_assert_held(&clp->cl_lock);
4285 
4286 	list_add(&oo->oo_owner.so_strhash,
4287 		 &clp->cl_ownerstr_hashtbl[strhashval]);
4288 	list_add(&oo->oo_perclient, &clp->cl_openowners);
4289 }
4290 
4291 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4292 {
4293 	unhash_openowner_locked(openowner(so));
4294 }
4295 
4296 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4297 {
4298 	struct nfs4_openowner *oo = openowner(so);
4299 
4300 	kmem_cache_free(openowner_slab, oo);
4301 }
4302 
4303 static const struct nfs4_stateowner_operations openowner_ops = {
4304 	.so_unhash =	nfs4_unhash_openowner,
4305 	.so_free =	nfs4_free_openowner,
4306 };
4307 
4308 static struct nfs4_ol_stateid *
4309 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4310 {
4311 	struct nfs4_ol_stateid *local, *ret = NULL;
4312 	struct nfs4_openowner *oo = open->op_openowner;
4313 
4314 	lockdep_assert_held(&fp->fi_lock);
4315 
4316 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4317 		/* ignore lock owners */
4318 		if (local->st_stateowner->so_is_open_owner == 0)
4319 			continue;
4320 		if (local->st_stateowner != &oo->oo_owner)
4321 			continue;
4322 		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4323 			ret = local;
4324 			refcount_inc(&ret->st_stid.sc_count);
4325 			break;
4326 		}
4327 	}
4328 	return ret;
4329 }
4330 
4331 static __be32
4332 nfsd4_verify_open_stid(struct nfs4_stid *s)
4333 {
4334 	__be32 ret = nfs_ok;
4335 
4336 	switch (s->sc_type) {
4337 	default:
4338 		break;
4339 	case 0:
4340 	case NFS4_CLOSED_STID:
4341 	case NFS4_CLOSED_DELEG_STID:
4342 		ret = nfserr_bad_stateid;
4343 		break;
4344 	case NFS4_REVOKED_DELEG_STID:
4345 		ret = nfserr_deleg_revoked;
4346 	}
4347 	return ret;
4348 }
4349 
4350 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4351 static __be32
4352 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4353 {
4354 	__be32 ret;
4355 
4356 	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4357 	ret = nfsd4_verify_open_stid(&stp->st_stid);
4358 	if (ret != nfs_ok)
4359 		mutex_unlock(&stp->st_mutex);
4360 	return ret;
4361 }
4362 
4363 static struct nfs4_ol_stateid *
4364 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4365 {
4366 	struct nfs4_ol_stateid *stp;
4367 	for (;;) {
4368 		spin_lock(&fp->fi_lock);
4369 		stp = nfsd4_find_existing_open(fp, open);
4370 		spin_unlock(&fp->fi_lock);
4371 		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4372 			break;
4373 		nfs4_put_stid(&stp->st_stid);
4374 	}
4375 	return stp;
4376 }
4377 
4378 static struct nfs4_openowner *
4379 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4380 			   struct nfsd4_compound_state *cstate)
4381 {
4382 	struct nfs4_client *clp = cstate->clp;
4383 	struct nfs4_openowner *oo, *ret;
4384 
4385 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4386 	if (!oo)
4387 		return NULL;
4388 	oo->oo_owner.so_ops = &openowner_ops;
4389 	oo->oo_owner.so_is_open_owner = 1;
4390 	oo->oo_owner.so_seqid = open->op_seqid;
4391 	oo->oo_flags = 0;
4392 	if (nfsd4_has_session(cstate))
4393 		oo->oo_flags |= NFS4_OO_CONFIRMED;
4394 	oo->oo_time = 0;
4395 	oo->oo_last_closed_stid = NULL;
4396 	INIT_LIST_HEAD(&oo->oo_close_lru);
4397 	spin_lock(&clp->cl_lock);
4398 	ret = find_openstateowner_str_locked(strhashval, open, clp);
4399 	if (ret == NULL) {
4400 		hash_openowner(oo, clp, strhashval);
4401 		ret = oo;
4402 	} else
4403 		nfs4_free_stateowner(&oo->oo_owner);
4404 
4405 	spin_unlock(&clp->cl_lock);
4406 	return ret;
4407 }
4408 
4409 static struct nfs4_ol_stateid *
4410 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4411 {
4412 
4413 	struct nfs4_openowner *oo = open->op_openowner;
4414 	struct nfs4_ol_stateid *retstp = NULL;
4415 	struct nfs4_ol_stateid *stp;
4416 
4417 	stp = open->op_stp;
4418 	/* We are moving these outside of the spinlocks to avoid the warnings */
4419 	mutex_init(&stp->st_mutex);
4420 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4421 
4422 retry:
4423 	spin_lock(&oo->oo_owner.so_client->cl_lock);
4424 	spin_lock(&fp->fi_lock);
4425 
4426 	retstp = nfsd4_find_existing_open(fp, open);
4427 	if (retstp)
4428 		goto out_unlock;
4429 
4430 	open->op_stp = NULL;
4431 	refcount_inc(&stp->st_stid.sc_count);
4432 	stp->st_stid.sc_type = NFS4_OPEN_STID;
4433 	INIT_LIST_HEAD(&stp->st_locks);
4434 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4435 	get_nfs4_file(fp);
4436 	stp->st_stid.sc_file = fp;
4437 	stp->st_access_bmap = 0;
4438 	stp->st_deny_bmap = 0;
4439 	stp->st_openstp = NULL;
4440 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4441 	list_add(&stp->st_perfile, &fp->fi_stateids);
4442 
4443 out_unlock:
4444 	spin_unlock(&fp->fi_lock);
4445 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
4446 	if (retstp) {
4447 		/* Handle races with CLOSE */
4448 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4449 			nfs4_put_stid(&retstp->st_stid);
4450 			goto retry;
4451 		}
4452 		/* To keep mutex tracking happy */
4453 		mutex_unlock(&stp->st_mutex);
4454 		stp = retstp;
4455 	}
4456 	return stp;
4457 }
4458 
4459 /*
4460  * In the 4.0 case we need to keep the owners around a little while to handle
4461  * CLOSE replay. We still do need to release any file access that is held by
4462  * them before returning however.
4463  */
4464 static void
4465 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4466 {
4467 	struct nfs4_ol_stateid *last;
4468 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
4469 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4470 						nfsd_net_id);
4471 
4472 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4473 
4474 	/*
4475 	 * We know that we hold one reference via nfsd4_close, and another
4476 	 * "persistent" reference for the client. If the refcount is higher
4477 	 * than 2, then there are still calls in progress that are using this
4478 	 * stateid. We can't put the sc_file reference until they are finished.
4479 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
4480 	 * there should be no danger of the refcount going back up again at
4481 	 * this point.
4482 	 */
4483 	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4484 
4485 	release_all_access(s);
4486 	if (s->st_stid.sc_file) {
4487 		put_nfs4_file(s->st_stid.sc_file);
4488 		s->st_stid.sc_file = NULL;
4489 	}
4490 
4491 	spin_lock(&nn->client_lock);
4492 	last = oo->oo_last_closed_stid;
4493 	oo->oo_last_closed_stid = s;
4494 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4495 	oo->oo_time = ktime_get_boottime_seconds();
4496 	spin_unlock(&nn->client_lock);
4497 	if (last)
4498 		nfs4_put_stid(&last->st_stid);
4499 }
4500 
4501 /* search file_hashtbl[] for file */
4502 static struct nfs4_file *
4503 find_file_locked(struct svc_fh *fh, unsigned int hashval)
4504 {
4505 	struct nfs4_file *fp;
4506 
4507 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4508 				lockdep_is_held(&state_lock)) {
4509 		if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4510 			if (refcount_inc_not_zero(&fp->fi_ref))
4511 				return fp;
4512 		}
4513 	}
4514 	return NULL;
4515 }
4516 
4517 static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
4518 				     unsigned int hashval)
4519 {
4520 	struct nfs4_file *fp;
4521 	struct nfs4_file *ret = NULL;
4522 	bool alias_found = false;
4523 
4524 	spin_lock(&state_lock);
4525 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4526 				 lockdep_is_held(&state_lock)) {
4527 		if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4528 			if (refcount_inc_not_zero(&fp->fi_ref))
4529 				ret = fp;
4530 		} else if (d_inode(fh->fh_dentry) == fp->fi_inode)
4531 			fp->fi_aliased = alias_found = true;
4532 	}
4533 	if (likely(ret == NULL)) {
4534 		nfsd4_init_file(fh, hashval, new);
4535 		new->fi_aliased = alias_found;
4536 		ret = new;
4537 	}
4538 	spin_unlock(&state_lock);
4539 	return ret;
4540 }
4541 
4542 static struct nfs4_file * find_file(struct svc_fh *fh)
4543 {
4544 	struct nfs4_file *fp;
4545 	unsigned int hashval = file_hashval(fh);
4546 
4547 	rcu_read_lock();
4548 	fp = find_file_locked(fh, hashval);
4549 	rcu_read_unlock();
4550 	return fp;
4551 }
4552 
4553 static struct nfs4_file *
4554 find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
4555 {
4556 	struct nfs4_file *fp;
4557 	unsigned int hashval = file_hashval(fh);
4558 
4559 	rcu_read_lock();
4560 	fp = find_file_locked(fh, hashval);
4561 	rcu_read_unlock();
4562 	if (fp)
4563 		return fp;
4564 
4565 	return insert_file(new, fh, hashval);
4566 }
4567 
4568 /*
4569  * Called to check deny when READ with all zero stateid or
4570  * WRITE with all zero or all one stateid
4571  */
4572 static __be32
4573 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4574 {
4575 	struct nfs4_file *fp;
4576 	__be32 ret = nfs_ok;
4577 
4578 	fp = find_file(current_fh);
4579 	if (!fp)
4580 		return ret;
4581 	/* Check for conflicting share reservations */
4582 	spin_lock(&fp->fi_lock);
4583 	if (fp->fi_share_deny & deny_type)
4584 		ret = nfserr_locked;
4585 	spin_unlock(&fp->fi_lock);
4586 	put_nfs4_file(fp);
4587 	return ret;
4588 }
4589 
4590 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4591 {
4592 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4593 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4594 					  nfsd_net_id);
4595 
4596 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4597 
4598 	/*
4599 	 * We can't do this in nfsd_break_deleg_cb because it is
4600 	 * already holding inode->i_lock.
4601 	 *
4602 	 * If the dl_time != 0, then we know that it has already been
4603 	 * queued for a lease break. Don't queue it again.
4604 	 */
4605 	spin_lock(&state_lock);
4606 	if (delegation_hashed(dp) && dp->dl_time == 0) {
4607 		dp->dl_time = ktime_get_boottime_seconds();
4608 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4609 	}
4610 	spin_unlock(&state_lock);
4611 }
4612 
4613 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4614 		struct rpc_task *task)
4615 {
4616 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4617 
4618 	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4619 	    dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4620 	        return 1;
4621 
4622 	switch (task->tk_status) {
4623 	case 0:
4624 		return 1;
4625 	case -NFS4ERR_DELAY:
4626 		rpc_delay(task, 2 * HZ);
4627 		return 0;
4628 	case -EBADHANDLE:
4629 	case -NFS4ERR_BAD_STATEID:
4630 		/*
4631 		 * Race: client probably got cb_recall before open reply
4632 		 * granting delegation.
4633 		 */
4634 		if (dp->dl_retries--) {
4635 			rpc_delay(task, 2 * HZ);
4636 			return 0;
4637 		}
4638 		fallthrough;
4639 	default:
4640 		return 1;
4641 	}
4642 }
4643 
4644 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4645 {
4646 	struct nfs4_delegation *dp = cb_to_delegation(cb);
4647 
4648 	nfs4_put_stid(&dp->dl_stid);
4649 }
4650 
4651 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4652 	.prepare	= nfsd4_cb_recall_prepare,
4653 	.done		= nfsd4_cb_recall_done,
4654 	.release	= nfsd4_cb_recall_release,
4655 };
4656 
4657 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4658 {
4659 	/*
4660 	 * We're assuming the state code never drops its reference
4661 	 * without first removing the lease.  Since we're in this lease
4662 	 * callback (and since the lease code is serialized by the
4663 	 * i_lock) we know the server hasn't removed the lease yet, and
4664 	 * we know it's safe to take a reference.
4665 	 */
4666 	refcount_inc(&dp->dl_stid.sc_count);
4667 	nfsd4_run_cb(&dp->dl_recall);
4668 }
4669 
4670 /* Called from break_lease() with i_lock held. */
4671 static bool
4672 nfsd_break_deleg_cb(struct file_lock *fl)
4673 {
4674 	bool ret = false;
4675 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4676 	struct nfs4_file *fp = dp->dl_stid.sc_file;
4677 
4678 	trace_nfsd_cb_recall(&dp->dl_stid);
4679 
4680 	/*
4681 	 * We don't want the locks code to timeout the lease for us;
4682 	 * we'll remove it ourself if a delegation isn't returned
4683 	 * in time:
4684 	 */
4685 	fl->fl_break_time = 0;
4686 
4687 	spin_lock(&fp->fi_lock);
4688 	fp->fi_had_conflict = true;
4689 	nfsd_break_one_deleg(dp);
4690 	spin_unlock(&fp->fi_lock);
4691 	return ret;
4692 }
4693 
4694 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4695 {
4696 	struct nfs4_delegation *dl = fl->fl_owner;
4697 	struct svc_rqst *rqst;
4698 	struct nfs4_client *clp;
4699 
4700 	if (!i_am_nfsd())
4701 		return NULL;
4702 	rqst = kthread_data(current);
4703 	/* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4704 	if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4705 		return NULL;
4706 	clp = *(rqst->rq_lease_breaker);
4707 	return dl->dl_stid.sc_client == clp;
4708 }
4709 
4710 static int
4711 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4712 		     struct list_head *dispose)
4713 {
4714 	if (arg & F_UNLCK)
4715 		return lease_modify(onlist, arg, dispose);
4716 	else
4717 		return -EAGAIN;
4718 }
4719 
4720 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4721 	.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4722 	.lm_break = nfsd_break_deleg_cb,
4723 	.lm_change = nfsd_change_deleg_cb,
4724 };
4725 
4726 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4727 {
4728 	if (nfsd4_has_session(cstate))
4729 		return nfs_ok;
4730 	if (seqid == so->so_seqid - 1)
4731 		return nfserr_replay_me;
4732 	if (seqid == so->so_seqid)
4733 		return nfs_ok;
4734 	return nfserr_bad_seqid;
4735 }
4736 
4737 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
4738 						struct nfsd_net *nn)
4739 {
4740 	struct nfs4_client *found;
4741 
4742 	spin_lock(&nn->client_lock);
4743 	found = find_confirmed_client(clid, sessions, nn);
4744 	if (found)
4745 		atomic_inc(&found->cl_rpc_users);
4746 	spin_unlock(&nn->client_lock);
4747 	return found;
4748 }
4749 
4750 static __be32 set_client(clientid_t *clid,
4751 		struct nfsd4_compound_state *cstate,
4752 		struct nfsd_net *nn)
4753 {
4754 	if (cstate->clp) {
4755 		if (!same_clid(&cstate->clp->cl_clientid, clid))
4756 			return nfserr_stale_clientid;
4757 		return nfs_ok;
4758 	}
4759 	if (STALE_CLIENTID(clid, nn))
4760 		return nfserr_stale_clientid;
4761 	/*
4762 	 * We're in the 4.0 case (otherwise the SEQUENCE op would have
4763 	 * set cstate->clp), so session = false:
4764 	 */
4765 	cstate->clp = lookup_clientid(clid, false, nn);
4766 	if (!cstate->clp)
4767 		return nfserr_expired;
4768 	return nfs_ok;
4769 }
4770 
4771 __be32
4772 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4773 		    struct nfsd4_open *open, struct nfsd_net *nn)
4774 {
4775 	clientid_t *clientid = &open->op_clientid;
4776 	struct nfs4_client *clp = NULL;
4777 	unsigned int strhashval;
4778 	struct nfs4_openowner *oo = NULL;
4779 	__be32 status;
4780 
4781 	/*
4782 	 * In case we need it later, after we've already created the
4783 	 * file and don't want to risk a further failure:
4784 	 */
4785 	open->op_file = nfsd4_alloc_file();
4786 	if (open->op_file == NULL)
4787 		return nfserr_jukebox;
4788 
4789 	status = set_client(clientid, cstate, nn);
4790 	if (status)
4791 		return status;
4792 	clp = cstate->clp;
4793 
4794 	strhashval = ownerstr_hashval(&open->op_owner);
4795 	oo = find_openstateowner_str(strhashval, open, clp);
4796 	open->op_openowner = oo;
4797 	if (!oo) {
4798 		goto new_owner;
4799 	}
4800 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4801 		/* Replace unconfirmed owners without checking for replay. */
4802 		release_openowner(oo);
4803 		open->op_openowner = NULL;
4804 		goto new_owner;
4805 	}
4806 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4807 	if (status)
4808 		return status;
4809 	goto alloc_stateid;
4810 new_owner:
4811 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
4812 	if (oo == NULL)
4813 		return nfserr_jukebox;
4814 	open->op_openowner = oo;
4815 alloc_stateid:
4816 	open->op_stp = nfs4_alloc_open_stateid(clp);
4817 	if (!open->op_stp)
4818 		return nfserr_jukebox;
4819 
4820 	if (nfsd4_has_session(cstate) &&
4821 	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4822 		open->op_odstate = alloc_clnt_odstate(clp);
4823 		if (!open->op_odstate)
4824 			return nfserr_jukebox;
4825 	}
4826 
4827 	return nfs_ok;
4828 }
4829 
4830 static inline __be32
4831 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4832 {
4833 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4834 		return nfserr_openmode;
4835 	else
4836 		return nfs_ok;
4837 }
4838 
4839 static int share_access_to_flags(u32 share_access)
4840 {
4841 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4842 }
4843 
4844 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4845 {
4846 	struct nfs4_stid *ret;
4847 
4848 	ret = find_stateid_by_type(cl, s,
4849 				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4850 	if (!ret)
4851 		return NULL;
4852 	return delegstateid(ret);
4853 }
4854 
4855 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4856 {
4857 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4858 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4859 }
4860 
4861 static __be32
4862 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4863 		struct nfs4_delegation **dp)
4864 {
4865 	int flags;
4866 	__be32 status = nfserr_bad_stateid;
4867 	struct nfs4_delegation *deleg;
4868 
4869 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4870 	if (deleg == NULL)
4871 		goto out;
4872 	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4873 		nfs4_put_stid(&deleg->dl_stid);
4874 		if (cl->cl_minorversion)
4875 			status = nfserr_deleg_revoked;
4876 		goto out;
4877 	}
4878 	flags = share_access_to_flags(open->op_share_access);
4879 	status = nfs4_check_delegmode(deleg, flags);
4880 	if (status) {
4881 		nfs4_put_stid(&deleg->dl_stid);
4882 		goto out;
4883 	}
4884 	*dp = deleg;
4885 out:
4886 	if (!nfsd4_is_deleg_cur(open))
4887 		return nfs_ok;
4888 	if (status)
4889 		return status;
4890 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4891 	return nfs_ok;
4892 }
4893 
4894 static inline int nfs4_access_to_access(u32 nfs4_access)
4895 {
4896 	int flags = 0;
4897 
4898 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4899 		flags |= NFSD_MAY_READ;
4900 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4901 		flags |= NFSD_MAY_WRITE;
4902 	return flags;
4903 }
4904 
4905 static inline __be32
4906 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4907 		struct nfsd4_open *open)
4908 {
4909 	struct iattr iattr = {
4910 		.ia_valid = ATTR_SIZE,
4911 		.ia_size = 0,
4912 	};
4913 	if (!open->op_truncate)
4914 		return 0;
4915 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4916 		return nfserr_inval;
4917 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
4918 }
4919 
4920 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4921 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4922 		struct nfsd4_open *open)
4923 {
4924 	struct nfsd_file *nf = NULL;
4925 	__be32 status;
4926 	int oflag = nfs4_access_to_omode(open->op_share_access);
4927 	int access = nfs4_access_to_access(open->op_share_access);
4928 	unsigned char old_access_bmap, old_deny_bmap;
4929 
4930 	spin_lock(&fp->fi_lock);
4931 
4932 	/*
4933 	 * Are we trying to set a deny mode that would conflict with
4934 	 * current access?
4935 	 */
4936 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4937 	if (status != nfs_ok) {
4938 		spin_unlock(&fp->fi_lock);
4939 		goto out;
4940 	}
4941 
4942 	/* set access to the file */
4943 	status = nfs4_file_get_access(fp, open->op_share_access);
4944 	if (status != nfs_ok) {
4945 		spin_unlock(&fp->fi_lock);
4946 		goto out;
4947 	}
4948 
4949 	/* Set access bits in stateid */
4950 	old_access_bmap = stp->st_access_bmap;
4951 	set_access(open->op_share_access, stp);
4952 
4953 	/* Set new deny mask */
4954 	old_deny_bmap = stp->st_deny_bmap;
4955 	set_deny(open->op_share_deny, stp);
4956 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4957 
4958 	if (!fp->fi_fds[oflag]) {
4959 		spin_unlock(&fp->fi_lock);
4960 		status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4961 		if (status)
4962 			goto out_put_access;
4963 		spin_lock(&fp->fi_lock);
4964 		if (!fp->fi_fds[oflag]) {
4965 			fp->fi_fds[oflag] = nf;
4966 			nf = NULL;
4967 		}
4968 	}
4969 	spin_unlock(&fp->fi_lock);
4970 	if (nf)
4971 		nfsd_file_put(nf);
4972 
4973 	status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
4974 								access));
4975 	if (status)
4976 		goto out_put_access;
4977 
4978 	status = nfsd4_truncate(rqstp, cur_fh, open);
4979 	if (status)
4980 		goto out_put_access;
4981 out:
4982 	return status;
4983 out_put_access:
4984 	stp->st_access_bmap = old_access_bmap;
4985 	nfs4_file_put_access(fp, open->op_share_access);
4986 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4987 	goto out;
4988 }
4989 
4990 static __be32
4991 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4992 {
4993 	__be32 status;
4994 	unsigned char old_deny_bmap = stp->st_deny_bmap;
4995 
4996 	if (!test_access(open->op_share_access, stp))
4997 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4998 
4999 	/* test and set deny mode */
5000 	spin_lock(&fp->fi_lock);
5001 	status = nfs4_file_check_deny(fp, open->op_share_deny);
5002 	if (status == nfs_ok) {
5003 		set_deny(open->op_share_deny, stp);
5004 		fp->fi_share_deny |=
5005 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5006 	}
5007 	spin_unlock(&fp->fi_lock);
5008 
5009 	if (status != nfs_ok)
5010 		return status;
5011 
5012 	status = nfsd4_truncate(rqstp, cur_fh, open);
5013 	if (status != nfs_ok)
5014 		reset_union_bmap_deny(old_deny_bmap, stp);
5015 	return status;
5016 }
5017 
5018 /* Should we give out recallable state?: */
5019 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5020 {
5021 	if (clp->cl_cb_state == NFSD4_CB_UP)
5022 		return true;
5023 	/*
5024 	 * In the sessions case, since we don't have to establish a
5025 	 * separate connection for callbacks, we assume it's OK
5026 	 * until we hear otherwise:
5027 	 */
5028 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5029 }
5030 
5031 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5032 						int flag)
5033 {
5034 	struct file_lock *fl;
5035 
5036 	fl = locks_alloc_lock();
5037 	if (!fl)
5038 		return NULL;
5039 	fl->fl_lmops = &nfsd_lease_mng_ops;
5040 	fl->fl_flags = FL_DELEG;
5041 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5042 	fl->fl_end = OFFSET_MAX;
5043 	fl->fl_owner = (fl_owner_t)dp;
5044 	fl->fl_pid = current->tgid;
5045 	fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5046 	return fl;
5047 }
5048 
5049 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5050 					 struct nfs4_file *fp)
5051 {
5052 	struct nfs4_ol_stateid *st;
5053 	struct file *f = fp->fi_deleg_file->nf_file;
5054 	struct inode *ino = locks_inode(f);
5055 	int writes;
5056 
5057 	writes = atomic_read(&ino->i_writecount);
5058 	if (!writes)
5059 		return 0;
5060 	/*
5061 	 * There could be multiple filehandles (hence multiple
5062 	 * nfs4_files) referencing this file, but that's not too
5063 	 * common; let's just give up in that case rather than
5064 	 * trying to go look up all the clients using that other
5065 	 * nfs4_file as well:
5066 	 */
5067 	if (fp->fi_aliased)
5068 		return -EAGAIN;
5069 	/*
5070 	 * If there's a close in progress, make sure that we see it
5071 	 * clear any fi_fds[] entries before we see it decrement
5072 	 * i_writecount:
5073 	 */
5074 	smp_mb__after_atomic();
5075 
5076 	if (fp->fi_fds[O_WRONLY])
5077 		writes--;
5078 	if (fp->fi_fds[O_RDWR])
5079 		writes--;
5080 	if (writes > 0)
5081 		return -EAGAIN; /* There may be non-NFSv4 writers */
5082 	/*
5083 	 * It's possible there are non-NFSv4 write opens in progress,
5084 	 * but if they haven't incremented i_writecount yet then they
5085 	 * also haven't called break lease yet; so, they'll break this
5086 	 * lease soon enough.  So, all that's left to check for is NFSv4
5087 	 * opens:
5088 	 */
5089 	spin_lock(&fp->fi_lock);
5090 	list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5091 		if (st->st_openstp == NULL /* it's an open */ &&
5092 		    access_permit_write(st) &&
5093 		    st->st_stid.sc_client != clp) {
5094 			spin_unlock(&fp->fi_lock);
5095 			return -EAGAIN;
5096 		}
5097 	}
5098 	spin_unlock(&fp->fi_lock);
5099 	/*
5100 	 * There's a small chance that we could be racing with another
5101 	 * NFSv4 open.  However, any open that hasn't added itself to
5102 	 * the fi_stateids list also hasn't called break_lease yet; so,
5103 	 * they'll break this lease soon enough.
5104 	 */
5105 	return 0;
5106 }
5107 
5108 static struct nfs4_delegation *
5109 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
5110 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
5111 {
5112 	int status = 0;
5113 	struct nfs4_delegation *dp;
5114 	struct nfsd_file *nf;
5115 	struct file_lock *fl;
5116 
5117 	/*
5118 	 * The fi_had_conflict and nfs_get_existing_delegation checks
5119 	 * here are just optimizations; we'll need to recheck them at
5120 	 * the end:
5121 	 */
5122 	if (fp->fi_had_conflict)
5123 		return ERR_PTR(-EAGAIN);
5124 
5125 	nf = find_readable_file(fp);
5126 	if (!nf) {
5127 		/*
5128 		 * We probably could attempt another open and get a read
5129 		 * delegation, but for now, don't bother until the
5130 		 * client actually sends us one.
5131 		 */
5132 		return ERR_PTR(-EAGAIN);
5133 	}
5134 	spin_lock(&state_lock);
5135 	spin_lock(&fp->fi_lock);
5136 	if (nfs4_delegation_exists(clp, fp))
5137 		status = -EAGAIN;
5138 	else if (!fp->fi_deleg_file) {
5139 		fp->fi_deleg_file = nf;
5140 		/* increment early to prevent fi_deleg_file from being
5141 		 * cleared */
5142 		fp->fi_delegees = 1;
5143 		nf = NULL;
5144 	} else
5145 		fp->fi_delegees++;
5146 	spin_unlock(&fp->fi_lock);
5147 	spin_unlock(&state_lock);
5148 	if (nf)
5149 		nfsd_file_put(nf);
5150 	if (status)
5151 		return ERR_PTR(status);
5152 
5153 	status = -ENOMEM;
5154 	dp = alloc_init_deleg(clp, fp, fh, odstate);
5155 	if (!dp)
5156 		goto out_delegees;
5157 
5158 	fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5159 	if (!fl)
5160 		goto out_clnt_odstate;
5161 
5162 	status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5163 	if (fl)
5164 		locks_free_lock(fl);
5165 	if (status)
5166 		goto out_clnt_odstate;
5167 	status = nfsd4_check_conflicting_opens(clp, fp);
5168 	if (status)
5169 		goto out_unlock;
5170 
5171 	spin_lock(&state_lock);
5172 	spin_lock(&fp->fi_lock);
5173 	if (fp->fi_had_conflict)
5174 		status = -EAGAIN;
5175 	else
5176 		status = hash_delegation_locked(dp, fp);
5177 	spin_unlock(&fp->fi_lock);
5178 	spin_unlock(&state_lock);
5179 
5180 	if (status)
5181 		goto out_unlock;
5182 
5183 	return dp;
5184 out_unlock:
5185 	vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5186 out_clnt_odstate:
5187 	put_clnt_odstate(dp->dl_clnt_odstate);
5188 	nfs4_put_stid(&dp->dl_stid);
5189 out_delegees:
5190 	put_deleg_file(fp);
5191 	return ERR_PTR(status);
5192 }
5193 
5194 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5195 {
5196 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5197 	if (status == -EAGAIN)
5198 		open->op_why_no_deleg = WND4_CONTENTION;
5199 	else {
5200 		open->op_why_no_deleg = WND4_RESOURCE;
5201 		switch (open->op_deleg_want) {
5202 		case NFS4_SHARE_WANT_READ_DELEG:
5203 		case NFS4_SHARE_WANT_WRITE_DELEG:
5204 		case NFS4_SHARE_WANT_ANY_DELEG:
5205 			break;
5206 		case NFS4_SHARE_WANT_CANCEL:
5207 			open->op_why_no_deleg = WND4_CANCELLED;
5208 			break;
5209 		case NFS4_SHARE_WANT_NO_DELEG:
5210 			WARN_ON_ONCE(1);
5211 		}
5212 	}
5213 }
5214 
5215 /*
5216  * Attempt to hand out a delegation.
5217  *
5218  * Note we don't support write delegations, and won't until the vfs has
5219  * proper support for them.
5220  */
5221 static void
5222 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
5223 			struct nfs4_ol_stateid *stp)
5224 {
5225 	struct nfs4_delegation *dp;
5226 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5227 	struct nfs4_client *clp = stp->st_stid.sc_client;
5228 	int cb_up;
5229 	int status = 0;
5230 
5231 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5232 	open->op_recall = 0;
5233 	switch (open->op_claim_type) {
5234 		case NFS4_OPEN_CLAIM_PREVIOUS:
5235 			if (!cb_up)
5236 				open->op_recall = 1;
5237 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5238 				goto out_no_deleg;
5239 			break;
5240 		case NFS4_OPEN_CLAIM_NULL:
5241 		case NFS4_OPEN_CLAIM_FH:
5242 			/*
5243 			 * Let's not give out any delegations till everyone's
5244 			 * had the chance to reclaim theirs, *and* until
5245 			 * NLM locks have all been reclaimed:
5246 			 */
5247 			if (locks_in_grace(clp->net))
5248 				goto out_no_deleg;
5249 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5250 				goto out_no_deleg;
5251 			break;
5252 		default:
5253 			goto out_no_deleg;
5254 	}
5255 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5256 	if (IS_ERR(dp))
5257 		goto out_no_deleg;
5258 
5259 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5260 
5261 	trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5262 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5263 	nfs4_put_stid(&dp->dl_stid);
5264 	return;
5265 out_no_deleg:
5266 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5267 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5268 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5269 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5270 		open->op_recall = 1;
5271 	}
5272 
5273 	/* 4.1 client asking for a delegation? */
5274 	if (open->op_deleg_want)
5275 		nfsd4_open_deleg_none_ext(open, status);
5276 	return;
5277 }
5278 
5279 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5280 					struct nfs4_delegation *dp)
5281 {
5282 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5283 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5284 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5285 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5286 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5287 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5288 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5289 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5290 	}
5291 	/* Otherwise the client must be confused wanting a delegation
5292 	 * it already has, therefore we don't return
5293 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5294 	 */
5295 }
5296 
5297 __be32
5298 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5299 {
5300 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
5301 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5302 	struct nfs4_file *fp = NULL;
5303 	struct nfs4_ol_stateid *stp = NULL;
5304 	struct nfs4_delegation *dp = NULL;
5305 	__be32 status;
5306 	bool new_stp = false;
5307 
5308 	/*
5309 	 * Lookup file; if found, lookup stateid and check open request,
5310 	 * and check for delegations in the process of being recalled.
5311 	 * If not found, create the nfs4_file struct
5312 	 */
5313 	fp = find_or_add_file(open->op_file, current_fh);
5314 	if (fp != open->op_file) {
5315 		status = nfs4_check_deleg(cl, open, &dp);
5316 		if (status)
5317 			goto out;
5318 		stp = nfsd4_find_and_lock_existing_open(fp, open);
5319 	} else {
5320 		open->op_file = NULL;
5321 		status = nfserr_bad_stateid;
5322 		if (nfsd4_is_deleg_cur(open))
5323 			goto out;
5324 	}
5325 
5326 	if (!stp) {
5327 		stp = init_open_stateid(fp, open);
5328 		if (!open->op_stp)
5329 			new_stp = true;
5330 	}
5331 
5332 	/*
5333 	 * OPEN the file, or upgrade an existing OPEN.
5334 	 * If truncate fails, the OPEN fails.
5335 	 *
5336 	 * stp is already locked.
5337 	 */
5338 	if (!new_stp) {
5339 		/* Stateid was found, this is an OPEN upgrade */
5340 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5341 		if (status) {
5342 			mutex_unlock(&stp->st_mutex);
5343 			goto out;
5344 		}
5345 	} else {
5346 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5347 		if (status) {
5348 			stp->st_stid.sc_type = NFS4_CLOSED_STID;
5349 			release_open_stateid(stp);
5350 			mutex_unlock(&stp->st_mutex);
5351 			goto out;
5352 		}
5353 
5354 		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5355 							open->op_odstate);
5356 		if (stp->st_clnt_odstate == open->op_odstate)
5357 			open->op_odstate = NULL;
5358 	}
5359 
5360 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5361 	mutex_unlock(&stp->st_mutex);
5362 
5363 	if (nfsd4_has_session(&resp->cstate)) {
5364 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5365 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5366 			open->op_why_no_deleg = WND4_NOT_WANTED;
5367 			goto nodeleg;
5368 		}
5369 	}
5370 
5371 	/*
5372 	* Attempt to hand out a delegation. No error return, because the
5373 	* OPEN succeeds even if we fail.
5374 	*/
5375 	nfs4_open_delegation(current_fh, open, stp);
5376 nodeleg:
5377 	status = nfs_ok;
5378 	trace_nfsd_open(&stp->st_stid.sc_stateid);
5379 out:
5380 	/* 4.1 client trying to upgrade/downgrade delegation? */
5381 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5382 	    open->op_deleg_want)
5383 		nfsd4_deleg_xgrade_none_ext(open, dp);
5384 
5385 	if (fp)
5386 		put_nfs4_file(fp);
5387 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5388 		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5389 	/*
5390 	* To finish the open response, we just need to set the rflags.
5391 	*/
5392 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5393 	if (nfsd4_has_session(&resp->cstate))
5394 		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5395 	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5396 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5397 
5398 	if (dp)
5399 		nfs4_put_stid(&dp->dl_stid);
5400 	if (stp)
5401 		nfs4_put_stid(&stp->st_stid);
5402 
5403 	return status;
5404 }
5405 
5406 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5407 			      struct nfsd4_open *open)
5408 {
5409 	if (open->op_openowner) {
5410 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5411 
5412 		nfsd4_cstate_assign_replay(cstate, so);
5413 		nfs4_put_stateowner(so);
5414 	}
5415 	if (open->op_file)
5416 		kmem_cache_free(file_slab, open->op_file);
5417 	if (open->op_stp)
5418 		nfs4_put_stid(&open->op_stp->st_stid);
5419 	if (open->op_odstate)
5420 		kmem_cache_free(odstate_slab, open->op_odstate);
5421 }
5422 
5423 __be32
5424 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5425 	    union nfsd4_op_u *u)
5426 {
5427 	clientid_t *clid = &u->renew;
5428 	struct nfs4_client *clp;
5429 	__be32 status;
5430 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5431 
5432 	trace_nfsd_clid_renew(clid);
5433 	status = set_client(clid, cstate, nn);
5434 	if (status)
5435 		return status;
5436 	clp = cstate->clp;
5437 	if (!list_empty(&clp->cl_delegations)
5438 			&& clp->cl_cb_state != NFSD4_CB_UP)
5439 		return nfserr_cb_path_down;
5440 	return nfs_ok;
5441 }
5442 
5443 void
5444 nfsd4_end_grace(struct nfsd_net *nn)
5445 {
5446 	/* do nothing if grace period already ended */
5447 	if (nn->grace_ended)
5448 		return;
5449 
5450 	trace_nfsd_grace_complete(nn);
5451 	nn->grace_ended = true;
5452 	/*
5453 	 * If the server goes down again right now, an NFSv4
5454 	 * client will still be allowed to reclaim after it comes back up,
5455 	 * even if it hasn't yet had a chance to reclaim state this time.
5456 	 *
5457 	 */
5458 	nfsd4_record_grace_done(nn);
5459 	/*
5460 	 * At this point, NFSv4 clients can still reclaim.  But if the
5461 	 * server crashes, any that have not yet reclaimed will be out
5462 	 * of luck on the next boot.
5463 	 *
5464 	 * (NFSv4.1+ clients are considered to have reclaimed once they
5465 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
5466 	 * have reclaimed after their first OPEN.)
5467 	 */
5468 	locks_end_grace(&nn->nfsd4_manager);
5469 	/*
5470 	 * At this point, and once lockd and/or any other containers
5471 	 * exit their grace period, further reclaims will fail and
5472 	 * regular locking can resume.
5473 	 */
5474 }
5475 
5476 /*
5477  * If we've waited a lease period but there are still clients trying to
5478  * reclaim, wait a little longer to give them a chance to finish.
5479  */
5480 static bool clients_still_reclaiming(struct nfsd_net *nn)
5481 {
5482 	time64_t double_grace_period_end = nn->boot_time +
5483 					   2 * nn->nfsd4_lease;
5484 
5485 	if (nn->track_reclaim_completes &&
5486 			atomic_read(&nn->nr_reclaim_complete) ==
5487 			nn->reclaim_str_hashtbl_size)
5488 		return false;
5489 	if (!nn->somebody_reclaimed)
5490 		return false;
5491 	nn->somebody_reclaimed = false;
5492 	/*
5493 	 * If we've given them *two* lease times to reclaim, and they're
5494 	 * still not done, give up:
5495 	 */
5496 	if (ktime_get_boottime_seconds() > double_grace_period_end)
5497 		return false;
5498 	return true;
5499 }
5500 
5501 struct laundry_time {
5502 	time64_t cutoff;
5503 	time64_t new_timeo;
5504 };
5505 
5506 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5507 {
5508 	time64_t time_remaining;
5509 
5510 	if (last_refresh < lt->cutoff)
5511 		return true;
5512 	time_remaining = last_refresh - lt->cutoff;
5513 	lt->new_timeo = min(lt->new_timeo, time_remaining);
5514 	return false;
5515 }
5516 
5517 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
5518 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5519 {
5520 	spin_lock_init(&nn->nfsd_ssc_lock);
5521 	INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5522 	init_waitqueue_head(&nn->nfsd_ssc_waitq);
5523 }
5524 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5525 
5526 /*
5527  * This is called when nfsd is being shutdown, after all inter_ssc
5528  * cleanup were done, to destroy the ssc delayed unmount list.
5529  */
5530 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5531 {
5532 	struct nfsd4_ssc_umount_item *ni = NULL;
5533 	struct nfsd4_ssc_umount_item *tmp;
5534 
5535 	spin_lock(&nn->nfsd_ssc_lock);
5536 	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5537 		list_del(&ni->nsui_list);
5538 		spin_unlock(&nn->nfsd_ssc_lock);
5539 		mntput(ni->nsui_vfsmount);
5540 		kfree(ni);
5541 		spin_lock(&nn->nfsd_ssc_lock);
5542 	}
5543 	spin_unlock(&nn->nfsd_ssc_lock);
5544 }
5545 
5546 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5547 {
5548 	bool do_wakeup = false;
5549 	struct nfsd4_ssc_umount_item *ni = NULL;
5550 	struct nfsd4_ssc_umount_item *tmp;
5551 
5552 	spin_lock(&nn->nfsd_ssc_lock);
5553 	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5554 		if (time_after(jiffies, ni->nsui_expire)) {
5555 			if (refcount_read(&ni->nsui_refcnt) > 1)
5556 				continue;
5557 
5558 			/* mark being unmount */
5559 			ni->nsui_busy = true;
5560 			spin_unlock(&nn->nfsd_ssc_lock);
5561 			mntput(ni->nsui_vfsmount);
5562 			spin_lock(&nn->nfsd_ssc_lock);
5563 
5564 			/* waiters need to start from begin of list */
5565 			list_del(&ni->nsui_list);
5566 			kfree(ni);
5567 
5568 			/* wakeup ssc_connect waiters */
5569 			do_wakeup = true;
5570 			continue;
5571 		}
5572 		break;
5573 	}
5574 	if (do_wakeup)
5575 		wake_up_all(&nn->nfsd_ssc_waitq);
5576 	spin_unlock(&nn->nfsd_ssc_lock);
5577 }
5578 #endif
5579 
5580 static time64_t
5581 nfs4_laundromat(struct nfsd_net *nn)
5582 {
5583 	struct nfs4_client *clp;
5584 	struct nfs4_openowner *oo;
5585 	struct nfs4_delegation *dp;
5586 	struct nfs4_ol_stateid *stp;
5587 	struct nfsd4_blocked_lock *nbl;
5588 	struct list_head *pos, *next, reaplist;
5589 	struct laundry_time lt = {
5590 		.cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
5591 		.new_timeo = nn->nfsd4_lease
5592 	};
5593 	struct nfs4_cpntf_state *cps;
5594 	copy_stateid_t *cps_t;
5595 	int i;
5596 
5597 	if (clients_still_reclaiming(nn)) {
5598 		lt.new_timeo = 0;
5599 		goto out;
5600 	}
5601 	nfsd4_end_grace(nn);
5602 	INIT_LIST_HEAD(&reaplist);
5603 
5604 	spin_lock(&nn->s2s_cp_lock);
5605 	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5606 		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5607 		if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5608 				state_expired(&lt, cps->cpntf_time))
5609 			_free_cpntf_state_locked(nn, cps);
5610 	}
5611 	spin_unlock(&nn->s2s_cp_lock);
5612 
5613 	spin_lock(&nn->client_lock);
5614 	list_for_each_safe(pos, next, &nn->client_lru) {
5615 		clp = list_entry(pos, struct nfs4_client, cl_lru);
5616 		if (!state_expired(&lt, clp->cl_time))
5617 			break;
5618 		if (mark_client_expired_locked(clp))
5619 			continue;
5620 		list_add(&clp->cl_lru, &reaplist);
5621 	}
5622 	spin_unlock(&nn->client_lock);
5623 	list_for_each_safe(pos, next, &reaplist) {
5624 		clp = list_entry(pos, struct nfs4_client, cl_lru);
5625 		trace_nfsd_clid_purged(&clp->cl_clientid);
5626 		list_del_init(&clp->cl_lru);
5627 		expire_client(clp);
5628 	}
5629 	spin_lock(&state_lock);
5630 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
5631 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5632 		if (!state_expired(&lt, dp->dl_time))
5633 			break;
5634 		WARN_ON(!unhash_delegation_locked(dp));
5635 		list_add(&dp->dl_recall_lru, &reaplist);
5636 	}
5637 	spin_unlock(&state_lock);
5638 	while (!list_empty(&reaplist)) {
5639 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
5640 					dl_recall_lru);
5641 		list_del_init(&dp->dl_recall_lru);
5642 		revoke_delegation(dp);
5643 	}
5644 
5645 	spin_lock(&nn->client_lock);
5646 	while (!list_empty(&nn->close_lru)) {
5647 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5648 					oo_close_lru);
5649 		if (!state_expired(&lt, oo->oo_time))
5650 			break;
5651 		list_del_init(&oo->oo_close_lru);
5652 		stp = oo->oo_last_closed_stid;
5653 		oo->oo_last_closed_stid = NULL;
5654 		spin_unlock(&nn->client_lock);
5655 		nfs4_put_stid(&stp->st_stid);
5656 		spin_lock(&nn->client_lock);
5657 	}
5658 	spin_unlock(&nn->client_lock);
5659 
5660 	/*
5661 	 * It's possible for a client to try and acquire an already held lock
5662 	 * that is being held for a long time, and then lose interest in it.
5663 	 * So, we clean out any un-revisited request after a lease period
5664 	 * under the assumption that the client is no longer interested.
5665 	 *
5666 	 * RFC5661, sec. 9.6 states that the client must not rely on getting
5667 	 * notifications and must continue to poll for locks, even when the
5668 	 * server supports them. Thus this shouldn't lead to clients blocking
5669 	 * indefinitely once the lock does become free.
5670 	 */
5671 	BUG_ON(!list_empty(&reaplist));
5672 	spin_lock(&nn->blocked_locks_lock);
5673 	while (!list_empty(&nn->blocked_locks_lru)) {
5674 		nbl = list_first_entry(&nn->blocked_locks_lru,
5675 					struct nfsd4_blocked_lock, nbl_lru);
5676 		if (!state_expired(&lt, nbl->nbl_time))
5677 			break;
5678 		list_move(&nbl->nbl_lru, &reaplist);
5679 		list_del_init(&nbl->nbl_list);
5680 	}
5681 	spin_unlock(&nn->blocked_locks_lock);
5682 
5683 	while (!list_empty(&reaplist)) {
5684 		nbl = list_first_entry(&reaplist,
5685 					struct nfsd4_blocked_lock, nbl_lru);
5686 		list_del_init(&nbl->nbl_lru);
5687 		free_blocked_lock(nbl);
5688 	}
5689 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
5690 	/* service the server-to-server copy delayed unmount list */
5691 	nfsd4_ssc_expire_umount(nn);
5692 #endif
5693 out:
5694 	return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5695 }
5696 
5697 static struct workqueue_struct *laundry_wq;
5698 static void laundromat_main(struct work_struct *);
5699 
5700 static void
5701 laundromat_main(struct work_struct *laundry)
5702 {
5703 	time64_t t;
5704 	struct delayed_work *dwork = to_delayed_work(laundry);
5705 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5706 					   laundromat_work);
5707 
5708 	t = nfs4_laundromat(nn);
5709 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5710 }
5711 
5712 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5713 {
5714 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5715 		return nfserr_bad_stateid;
5716 	return nfs_ok;
5717 }
5718 
5719 static
5720 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5721 {
5722         __be32 status = nfserr_openmode;
5723 
5724 	/* For lock stateid's, we test the parent open, not the lock: */
5725 	if (stp->st_openstp)
5726 		stp = stp->st_openstp;
5727 	if ((flags & WR_STATE) && !access_permit_write(stp))
5728                 goto out;
5729 	if ((flags & RD_STATE) && !access_permit_read(stp))
5730                 goto out;
5731 	status = nfs_ok;
5732 out:
5733 	return status;
5734 }
5735 
5736 static inline __be32
5737 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5738 {
5739 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
5740 		return nfs_ok;
5741 	else if (opens_in_grace(net)) {
5742 		/* Answer in remaining cases depends on existence of
5743 		 * conflicting state; so we must wait out the grace period. */
5744 		return nfserr_grace;
5745 	} else if (flags & WR_STATE)
5746 		return nfs4_share_conflict(current_fh,
5747 				NFS4_SHARE_DENY_WRITE);
5748 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5749 		return nfs4_share_conflict(current_fh,
5750 				NFS4_SHARE_DENY_READ);
5751 }
5752 
5753 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5754 {
5755 	/*
5756 	 * When sessions are used the stateid generation number is ignored
5757 	 * when it is zero.
5758 	 */
5759 	if (has_session && in->si_generation == 0)
5760 		return nfs_ok;
5761 
5762 	if (in->si_generation == ref->si_generation)
5763 		return nfs_ok;
5764 
5765 	/* If the client sends us a stateid from the future, it's buggy: */
5766 	if (nfsd4_stateid_generation_after(in, ref))
5767 		return nfserr_bad_stateid;
5768 	/*
5769 	 * However, we could see a stateid from the past, even from a
5770 	 * non-buggy client.  For example, if the client sends a lock
5771 	 * while some IO is outstanding, the lock may bump si_generation
5772 	 * while the IO is still in flight.  The client could avoid that
5773 	 * situation by waiting for responses on all the IO requests,
5774 	 * but better performance may result in retrying IO that
5775 	 * receives an old_stateid error if requests are rarely
5776 	 * reordered in flight:
5777 	 */
5778 	return nfserr_old_stateid;
5779 }
5780 
5781 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5782 {
5783 	__be32 ret;
5784 
5785 	spin_lock(&s->sc_lock);
5786 	ret = nfsd4_verify_open_stid(s);
5787 	if (ret == nfs_ok)
5788 		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5789 	spin_unlock(&s->sc_lock);
5790 	return ret;
5791 }
5792 
5793 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5794 {
5795 	if (ols->st_stateowner->so_is_open_owner &&
5796 	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5797 		return nfserr_bad_stateid;
5798 	return nfs_ok;
5799 }
5800 
5801 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5802 {
5803 	struct nfs4_stid *s;
5804 	__be32 status = nfserr_bad_stateid;
5805 
5806 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5807 		CLOSE_STATEID(stateid))
5808 		return status;
5809 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
5810 		return status;
5811 	spin_lock(&cl->cl_lock);
5812 	s = find_stateid_locked(cl, stateid);
5813 	if (!s)
5814 		goto out_unlock;
5815 	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5816 	if (status)
5817 		goto out_unlock;
5818 	switch (s->sc_type) {
5819 	case NFS4_DELEG_STID:
5820 		status = nfs_ok;
5821 		break;
5822 	case NFS4_REVOKED_DELEG_STID:
5823 		status = nfserr_deleg_revoked;
5824 		break;
5825 	case NFS4_OPEN_STID:
5826 	case NFS4_LOCK_STID:
5827 		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5828 		break;
5829 	default:
5830 		printk("unknown stateid type %x\n", s->sc_type);
5831 		fallthrough;
5832 	case NFS4_CLOSED_STID:
5833 	case NFS4_CLOSED_DELEG_STID:
5834 		status = nfserr_bad_stateid;
5835 	}
5836 out_unlock:
5837 	spin_unlock(&cl->cl_lock);
5838 	return status;
5839 }
5840 
5841 __be32
5842 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5843 		     stateid_t *stateid, unsigned char typemask,
5844 		     struct nfs4_stid **s, struct nfsd_net *nn)
5845 {
5846 	__be32 status;
5847 	bool return_revoked = false;
5848 
5849 	/*
5850 	 *  only return revoked delegations if explicitly asked.
5851 	 *  otherwise we report revoked or bad_stateid status.
5852 	 */
5853 	if (typemask & NFS4_REVOKED_DELEG_STID)
5854 		return_revoked = true;
5855 	else if (typemask & NFS4_DELEG_STID)
5856 		typemask |= NFS4_REVOKED_DELEG_STID;
5857 
5858 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5859 		CLOSE_STATEID(stateid))
5860 		return nfserr_bad_stateid;
5861 	status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
5862 	if (status == nfserr_stale_clientid) {
5863 		if (cstate->session)
5864 			return nfserr_bad_stateid;
5865 		return nfserr_stale_stateid;
5866 	}
5867 	if (status)
5868 		return status;
5869 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
5870 	if (!*s)
5871 		return nfserr_bad_stateid;
5872 	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5873 		nfs4_put_stid(*s);
5874 		if (cstate->minorversion)
5875 			return nfserr_deleg_revoked;
5876 		return nfserr_bad_stateid;
5877 	}
5878 	return nfs_ok;
5879 }
5880 
5881 static struct nfsd_file *
5882 nfs4_find_file(struct nfs4_stid *s, int flags)
5883 {
5884 	if (!s)
5885 		return NULL;
5886 
5887 	switch (s->sc_type) {
5888 	case NFS4_DELEG_STID:
5889 		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5890 			return NULL;
5891 		return nfsd_file_get(s->sc_file->fi_deleg_file);
5892 	case NFS4_OPEN_STID:
5893 	case NFS4_LOCK_STID:
5894 		if (flags & RD_STATE)
5895 			return find_readable_file(s->sc_file);
5896 		else
5897 			return find_writeable_file(s->sc_file);
5898 	}
5899 
5900 	return NULL;
5901 }
5902 
5903 static __be32
5904 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5905 {
5906 	__be32 status;
5907 
5908 	status = nfsd4_check_openowner_confirmed(ols);
5909 	if (status)
5910 		return status;
5911 	return nfs4_check_openmode(ols, flags);
5912 }
5913 
5914 static __be32
5915 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5916 		struct nfsd_file **nfp, int flags)
5917 {
5918 	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5919 	struct nfsd_file *nf;
5920 	__be32 status;
5921 
5922 	nf = nfs4_find_file(s, flags);
5923 	if (nf) {
5924 		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5925 				acc | NFSD_MAY_OWNER_OVERRIDE);
5926 		if (status) {
5927 			nfsd_file_put(nf);
5928 			goto out;
5929 		}
5930 	} else {
5931 		status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5932 		if (status)
5933 			return status;
5934 	}
5935 	*nfp = nf;
5936 out:
5937 	return status;
5938 }
5939 static void
5940 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5941 {
5942 	WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
5943 	if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
5944 		return;
5945 	list_del(&cps->cp_list);
5946 	idr_remove(&nn->s2s_cp_stateids,
5947 		   cps->cp_stateid.stid.si_opaque.so_id);
5948 	kfree(cps);
5949 }
5950 /*
5951  * A READ from an inter server to server COPY will have a
5952  * copy stateid. Look up the copy notify stateid from the
5953  * idr structure and take a reference on it.
5954  */
5955 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5956 			  struct nfs4_client *clp,
5957 			  struct nfs4_cpntf_state **cps)
5958 {
5959 	copy_stateid_t *cps_t;
5960 	struct nfs4_cpntf_state *state = NULL;
5961 
5962 	if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
5963 		return nfserr_bad_stateid;
5964 	spin_lock(&nn->s2s_cp_lock);
5965 	cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
5966 	if (cps_t) {
5967 		state = container_of(cps_t, struct nfs4_cpntf_state,
5968 				     cp_stateid);
5969 		if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
5970 			state = NULL;
5971 			goto unlock;
5972 		}
5973 		if (!clp)
5974 			refcount_inc(&state->cp_stateid.sc_count);
5975 		else
5976 			_free_cpntf_state_locked(nn, state);
5977 	}
5978 unlock:
5979 	spin_unlock(&nn->s2s_cp_lock);
5980 	if (!state)
5981 		return nfserr_bad_stateid;
5982 	if (!clp && state)
5983 		*cps = state;
5984 	return 0;
5985 }
5986 
5987 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5988 			       struct nfs4_stid **stid)
5989 {
5990 	__be32 status;
5991 	struct nfs4_cpntf_state *cps = NULL;
5992 	struct nfs4_client *found;
5993 
5994 	status = manage_cpntf_state(nn, st, NULL, &cps);
5995 	if (status)
5996 		return status;
5997 
5998 	cps->cpntf_time = ktime_get_boottime_seconds();
5999 
6000 	status = nfserr_expired;
6001 	found = lookup_clientid(&cps->cp_p_clid, true, nn);
6002 	if (!found)
6003 		goto out;
6004 
6005 	*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6006 			NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6007 	if (*stid)
6008 		status = nfs_ok;
6009 	else
6010 		status = nfserr_bad_stateid;
6011 
6012 	put_client_renew(found);
6013 out:
6014 	nfs4_put_cpntf_state(nn, cps);
6015 	return status;
6016 }
6017 
6018 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6019 {
6020 	spin_lock(&nn->s2s_cp_lock);
6021 	_free_cpntf_state_locked(nn, cps);
6022 	spin_unlock(&nn->s2s_cp_lock);
6023 }
6024 
6025 /*
6026  * Checks for stateid operations
6027  */
6028 __be32
6029 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6030 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6031 		stateid_t *stateid, int flags, struct nfsd_file **nfp,
6032 		struct nfs4_stid **cstid)
6033 {
6034 	struct net *net = SVC_NET(rqstp);
6035 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6036 	struct nfs4_stid *s = NULL;
6037 	__be32 status;
6038 
6039 	if (nfp)
6040 		*nfp = NULL;
6041 
6042 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6043 		status = check_special_stateids(net, fhp, stateid, flags);
6044 		goto done;
6045 	}
6046 
6047 	status = nfsd4_lookup_stateid(cstate, stateid,
6048 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6049 				&s, nn);
6050 	if (status == nfserr_bad_stateid)
6051 		status = find_cpntf_state(nn, stateid, &s);
6052 	if (status)
6053 		return status;
6054 	status = nfsd4_stid_check_stateid_generation(stateid, s,
6055 			nfsd4_has_session(cstate));
6056 	if (status)
6057 		goto out;
6058 
6059 	switch (s->sc_type) {
6060 	case NFS4_DELEG_STID:
6061 		status = nfs4_check_delegmode(delegstateid(s), flags);
6062 		break;
6063 	case NFS4_OPEN_STID:
6064 	case NFS4_LOCK_STID:
6065 		status = nfs4_check_olstateid(openlockstateid(s), flags);
6066 		break;
6067 	default:
6068 		status = nfserr_bad_stateid;
6069 		break;
6070 	}
6071 	if (status)
6072 		goto out;
6073 	status = nfs4_check_fh(fhp, s);
6074 
6075 done:
6076 	if (status == nfs_ok && nfp)
6077 		status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6078 out:
6079 	if (s) {
6080 		if (!status && cstid)
6081 			*cstid = s;
6082 		else
6083 			nfs4_put_stid(s);
6084 	}
6085 	return status;
6086 }
6087 
6088 /*
6089  * Test if the stateid is valid
6090  */
6091 __be32
6092 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6093 		   union nfsd4_op_u *u)
6094 {
6095 	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6096 	struct nfsd4_test_stateid_id *stateid;
6097 	struct nfs4_client *cl = cstate->clp;
6098 
6099 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6100 		stateid->ts_id_status =
6101 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6102 
6103 	return nfs_ok;
6104 }
6105 
6106 static __be32
6107 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6108 {
6109 	struct nfs4_ol_stateid *stp = openlockstateid(s);
6110 	__be32 ret;
6111 
6112 	ret = nfsd4_lock_ol_stateid(stp);
6113 	if (ret)
6114 		goto out_put_stid;
6115 
6116 	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6117 	if (ret)
6118 		goto out;
6119 
6120 	ret = nfserr_locks_held;
6121 	if (check_for_locks(stp->st_stid.sc_file,
6122 			    lockowner(stp->st_stateowner)))
6123 		goto out;
6124 
6125 	release_lock_stateid(stp);
6126 	ret = nfs_ok;
6127 
6128 out:
6129 	mutex_unlock(&stp->st_mutex);
6130 out_put_stid:
6131 	nfs4_put_stid(s);
6132 	return ret;
6133 }
6134 
6135 __be32
6136 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6137 		   union nfsd4_op_u *u)
6138 {
6139 	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6140 	stateid_t *stateid = &free_stateid->fr_stateid;
6141 	struct nfs4_stid *s;
6142 	struct nfs4_delegation *dp;
6143 	struct nfs4_client *cl = cstate->clp;
6144 	__be32 ret = nfserr_bad_stateid;
6145 
6146 	spin_lock(&cl->cl_lock);
6147 	s = find_stateid_locked(cl, stateid);
6148 	if (!s)
6149 		goto out_unlock;
6150 	spin_lock(&s->sc_lock);
6151 	switch (s->sc_type) {
6152 	case NFS4_DELEG_STID:
6153 		ret = nfserr_locks_held;
6154 		break;
6155 	case NFS4_OPEN_STID:
6156 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6157 		if (ret)
6158 			break;
6159 		ret = nfserr_locks_held;
6160 		break;
6161 	case NFS4_LOCK_STID:
6162 		spin_unlock(&s->sc_lock);
6163 		refcount_inc(&s->sc_count);
6164 		spin_unlock(&cl->cl_lock);
6165 		ret = nfsd4_free_lock_stateid(stateid, s);
6166 		goto out;
6167 	case NFS4_REVOKED_DELEG_STID:
6168 		spin_unlock(&s->sc_lock);
6169 		dp = delegstateid(s);
6170 		list_del_init(&dp->dl_recall_lru);
6171 		spin_unlock(&cl->cl_lock);
6172 		nfs4_put_stid(s);
6173 		ret = nfs_ok;
6174 		goto out;
6175 	/* Default falls through and returns nfserr_bad_stateid */
6176 	}
6177 	spin_unlock(&s->sc_lock);
6178 out_unlock:
6179 	spin_unlock(&cl->cl_lock);
6180 out:
6181 	return ret;
6182 }
6183 
6184 static inline int
6185 setlkflg (int type)
6186 {
6187 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6188 		RD_STATE : WR_STATE;
6189 }
6190 
6191 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6192 {
6193 	struct svc_fh *current_fh = &cstate->current_fh;
6194 	struct nfs4_stateowner *sop = stp->st_stateowner;
6195 	__be32 status;
6196 
6197 	status = nfsd4_check_seqid(cstate, sop, seqid);
6198 	if (status)
6199 		return status;
6200 	status = nfsd4_lock_ol_stateid(stp);
6201 	if (status != nfs_ok)
6202 		return status;
6203 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6204 	if (status == nfs_ok)
6205 		status = nfs4_check_fh(current_fh, &stp->st_stid);
6206 	if (status != nfs_ok)
6207 		mutex_unlock(&stp->st_mutex);
6208 	return status;
6209 }
6210 
6211 /*
6212  * Checks for sequence id mutating operations.
6213  */
6214 static __be32
6215 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6216 			 stateid_t *stateid, char typemask,
6217 			 struct nfs4_ol_stateid **stpp,
6218 			 struct nfsd_net *nn)
6219 {
6220 	__be32 status;
6221 	struct nfs4_stid *s;
6222 	struct nfs4_ol_stateid *stp = NULL;
6223 
6224 	trace_nfsd_preprocess(seqid, stateid);
6225 
6226 	*stpp = NULL;
6227 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6228 	if (status)
6229 		return status;
6230 	stp = openlockstateid(s);
6231 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6232 
6233 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6234 	if (!status)
6235 		*stpp = stp;
6236 	else
6237 		nfs4_put_stid(&stp->st_stid);
6238 	return status;
6239 }
6240 
6241 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6242 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6243 {
6244 	__be32 status;
6245 	struct nfs4_openowner *oo;
6246 	struct nfs4_ol_stateid *stp;
6247 
6248 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6249 						NFS4_OPEN_STID, &stp, nn);
6250 	if (status)
6251 		return status;
6252 	oo = openowner(stp->st_stateowner);
6253 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6254 		mutex_unlock(&stp->st_mutex);
6255 		nfs4_put_stid(&stp->st_stid);
6256 		return nfserr_bad_stateid;
6257 	}
6258 	*stpp = stp;
6259 	return nfs_ok;
6260 }
6261 
6262 __be32
6263 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6264 		   union nfsd4_op_u *u)
6265 {
6266 	struct nfsd4_open_confirm *oc = &u->open_confirm;
6267 	__be32 status;
6268 	struct nfs4_openowner *oo;
6269 	struct nfs4_ol_stateid *stp;
6270 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6271 
6272 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6273 			cstate->current_fh.fh_dentry);
6274 
6275 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6276 	if (status)
6277 		return status;
6278 
6279 	status = nfs4_preprocess_seqid_op(cstate,
6280 					oc->oc_seqid, &oc->oc_req_stateid,
6281 					NFS4_OPEN_STID, &stp, nn);
6282 	if (status)
6283 		goto out;
6284 	oo = openowner(stp->st_stateowner);
6285 	status = nfserr_bad_stateid;
6286 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6287 		mutex_unlock(&stp->st_mutex);
6288 		goto put_stateid;
6289 	}
6290 	oo->oo_flags |= NFS4_OO_CONFIRMED;
6291 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6292 	mutex_unlock(&stp->st_mutex);
6293 	trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6294 	nfsd4_client_record_create(oo->oo_owner.so_client);
6295 	status = nfs_ok;
6296 put_stateid:
6297 	nfs4_put_stid(&stp->st_stid);
6298 out:
6299 	nfsd4_bump_seqid(cstate, status);
6300 	return status;
6301 }
6302 
6303 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6304 {
6305 	if (!test_access(access, stp))
6306 		return;
6307 	nfs4_file_put_access(stp->st_stid.sc_file, access);
6308 	clear_access(access, stp);
6309 }
6310 
6311 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6312 {
6313 	switch (to_access) {
6314 	case NFS4_SHARE_ACCESS_READ:
6315 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6316 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6317 		break;
6318 	case NFS4_SHARE_ACCESS_WRITE:
6319 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6320 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6321 		break;
6322 	case NFS4_SHARE_ACCESS_BOTH:
6323 		break;
6324 	default:
6325 		WARN_ON_ONCE(1);
6326 	}
6327 }
6328 
6329 __be32
6330 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6331 		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6332 {
6333 	struct nfsd4_open_downgrade *od = &u->open_downgrade;
6334 	__be32 status;
6335 	struct nfs4_ol_stateid *stp;
6336 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6337 
6338 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6339 			cstate->current_fh.fh_dentry);
6340 
6341 	/* We don't yet support WANT bits: */
6342 	if (od->od_deleg_want)
6343 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6344 			od->od_deleg_want);
6345 
6346 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6347 					&od->od_stateid, &stp, nn);
6348 	if (status)
6349 		goto out;
6350 	status = nfserr_inval;
6351 	if (!test_access(od->od_share_access, stp)) {
6352 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6353 			stp->st_access_bmap, od->od_share_access);
6354 		goto put_stateid;
6355 	}
6356 	if (!test_deny(od->od_share_deny, stp)) {
6357 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6358 			stp->st_deny_bmap, od->od_share_deny);
6359 		goto put_stateid;
6360 	}
6361 	nfs4_stateid_downgrade(stp, od->od_share_access);
6362 	reset_union_bmap_deny(od->od_share_deny, stp);
6363 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6364 	status = nfs_ok;
6365 put_stateid:
6366 	mutex_unlock(&stp->st_mutex);
6367 	nfs4_put_stid(&stp->st_stid);
6368 out:
6369 	nfsd4_bump_seqid(cstate, status);
6370 	return status;
6371 }
6372 
6373 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6374 {
6375 	struct nfs4_client *clp = s->st_stid.sc_client;
6376 	bool unhashed;
6377 	LIST_HEAD(reaplist);
6378 
6379 	spin_lock(&clp->cl_lock);
6380 	unhashed = unhash_open_stateid(s, &reaplist);
6381 
6382 	if (clp->cl_minorversion) {
6383 		if (unhashed)
6384 			put_ol_stateid_locked(s, &reaplist);
6385 		spin_unlock(&clp->cl_lock);
6386 		free_ol_stateid_reaplist(&reaplist);
6387 	} else {
6388 		spin_unlock(&clp->cl_lock);
6389 		free_ol_stateid_reaplist(&reaplist);
6390 		if (unhashed)
6391 			move_to_close_lru(s, clp->net);
6392 	}
6393 }
6394 
6395 /*
6396  * nfs4_unlock_state() called after encode
6397  */
6398 __be32
6399 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6400 		union nfsd4_op_u *u)
6401 {
6402 	struct nfsd4_close *close = &u->close;
6403 	__be32 status;
6404 	struct nfs4_ol_stateid *stp;
6405 	struct net *net = SVC_NET(rqstp);
6406 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6407 
6408 	dprintk("NFSD: nfsd4_close on file %pd\n",
6409 			cstate->current_fh.fh_dentry);
6410 
6411 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6412 					&close->cl_stateid,
6413 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
6414 					&stp, nn);
6415 	nfsd4_bump_seqid(cstate, status);
6416 	if (status)
6417 		goto out;
6418 
6419 	stp->st_stid.sc_type = NFS4_CLOSED_STID;
6420 
6421 	/*
6422 	 * Technically we don't _really_ have to increment or copy it, since
6423 	 * it should just be gone after this operation and we clobber the
6424 	 * copied value below, but we continue to do so here just to ensure
6425 	 * that racing ops see that there was a state change.
6426 	 */
6427 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6428 
6429 	nfsd4_close_open_stateid(stp);
6430 	mutex_unlock(&stp->st_mutex);
6431 
6432 	/* v4.1+ suggests that we send a special stateid in here, since the
6433 	 * clients should just ignore this anyway. Since this is not useful
6434 	 * for v4.0 clients either, we set it to the special close_stateid
6435 	 * universally.
6436 	 *
6437 	 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6438 	 */
6439 	memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6440 
6441 	/* put reference from nfs4_preprocess_seqid_op */
6442 	nfs4_put_stid(&stp->st_stid);
6443 out:
6444 	return status;
6445 }
6446 
6447 __be32
6448 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6449 		  union nfsd4_op_u *u)
6450 {
6451 	struct nfsd4_delegreturn *dr = &u->delegreturn;
6452 	struct nfs4_delegation *dp;
6453 	stateid_t *stateid = &dr->dr_stateid;
6454 	struct nfs4_stid *s;
6455 	__be32 status;
6456 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6457 
6458 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6459 		return status;
6460 
6461 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6462 	if (status)
6463 		goto out;
6464 	dp = delegstateid(s);
6465 	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6466 	if (status)
6467 		goto put_stateid;
6468 
6469 	destroy_delegation(dp);
6470 put_stateid:
6471 	nfs4_put_stid(&dp->dl_stid);
6472 out:
6473 	return status;
6474 }
6475 
6476 /* last octet in a range */
6477 static inline u64
6478 last_byte_offset(u64 start, u64 len)
6479 {
6480 	u64 end;
6481 
6482 	WARN_ON_ONCE(!len);
6483 	end = start + len;
6484 	return end > start ? end - 1: NFS4_MAX_UINT64;
6485 }
6486 
6487 /*
6488  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6489  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6490  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
6491  * locking, this prevents us from being completely protocol-compliant.  The
6492  * real solution to this problem is to start using unsigned file offsets in
6493  * the VFS, but this is a very deep change!
6494  */
6495 static inline void
6496 nfs4_transform_lock_offset(struct file_lock *lock)
6497 {
6498 	if (lock->fl_start < 0)
6499 		lock->fl_start = OFFSET_MAX;
6500 	if (lock->fl_end < 0)
6501 		lock->fl_end = OFFSET_MAX;
6502 }
6503 
6504 static fl_owner_t
6505 nfsd4_fl_get_owner(fl_owner_t owner)
6506 {
6507 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6508 
6509 	nfs4_get_stateowner(&lo->lo_owner);
6510 	return owner;
6511 }
6512 
6513 static void
6514 nfsd4_fl_put_owner(fl_owner_t owner)
6515 {
6516 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6517 
6518 	if (lo)
6519 		nfs4_put_stateowner(&lo->lo_owner);
6520 }
6521 
6522 static void
6523 nfsd4_lm_notify(struct file_lock *fl)
6524 {
6525 	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
6526 	struct net			*net = lo->lo_owner.so_client->net;
6527 	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
6528 	struct nfsd4_blocked_lock	*nbl = container_of(fl,
6529 						struct nfsd4_blocked_lock, nbl_lock);
6530 	bool queue = false;
6531 
6532 	/* An empty list means that something else is going to be using it */
6533 	spin_lock(&nn->blocked_locks_lock);
6534 	if (!list_empty(&nbl->nbl_list)) {
6535 		list_del_init(&nbl->nbl_list);
6536 		list_del_init(&nbl->nbl_lru);
6537 		queue = true;
6538 	}
6539 	spin_unlock(&nn->blocked_locks_lock);
6540 
6541 	if (queue) {
6542 		trace_nfsd_cb_notify_lock(lo, nbl);
6543 		nfsd4_run_cb(&nbl->nbl_cb);
6544 	}
6545 }
6546 
6547 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
6548 	.lm_notify = nfsd4_lm_notify,
6549 	.lm_get_owner = nfsd4_fl_get_owner,
6550 	.lm_put_owner = nfsd4_fl_put_owner,
6551 };
6552 
6553 static inline void
6554 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6555 {
6556 	struct nfs4_lockowner *lo;
6557 
6558 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6559 		lo = (struct nfs4_lockowner *) fl->fl_owner;
6560 		xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6561 						GFP_KERNEL);
6562 		if (!deny->ld_owner.data)
6563 			/* We just don't care that much */
6564 			goto nevermind;
6565 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6566 	} else {
6567 nevermind:
6568 		deny->ld_owner.len = 0;
6569 		deny->ld_owner.data = NULL;
6570 		deny->ld_clientid.cl_boot = 0;
6571 		deny->ld_clientid.cl_id = 0;
6572 	}
6573 	deny->ld_start = fl->fl_start;
6574 	deny->ld_length = NFS4_MAX_UINT64;
6575 	if (fl->fl_end != NFS4_MAX_UINT64)
6576 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
6577 	deny->ld_type = NFS4_READ_LT;
6578 	if (fl->fl_type != F_RDLCK)
6579 		deny->ld_type = NFS4_WRITE_LT;
6580 }
6581 
6582 static struct nfs4_lockowner *
6583 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6584 {
6585 	unsigned int strhashval = ownerstr_hashval(owner);
6586 	struct nfs4_stateowner *so;
6587 
6588 	lockdep_assert_held(&clp->cl_lock);
6589 
6590 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6591 			    so_strhash) {
6592 		if (so->so_is_open_owner)
6593 			continue;
6594 		if (same_owner_str(so, owner))
6595 			return lockowner(nfs4_get_stateowner(so));
6596 	}
6597 	return NULL;
6598 }
6599 
6600 static struct nfs4_lockowner *
6601 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6602 {
6603 	struct nfs4_lockowner *lo;
6604 
6605 	spin_lock(&clp->cl_lock);
6606 	lo = find_lockowner_str_locked(clp, owner);
6607 	spin_unlock(&clp->cl_lock);
6608 	return lo;
6609 }
6610 
6611 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6612 {
6613 	unhash_lockowner_locked(lockowner(sop));
6614 }
6615 
6616 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6617 {
6618 	struct nfs4_lockowner *lo = lockowner(sop);
6619 
6620 	kmem_cache_free(lockowner_slab, lo);
6621 }
6622 
6623 static const struct nfs4_stateowner_operations lockowner_ops = {
6624 	.so_unhash =	nfs4_unhash_lockowner,
6625 	.so_free =	nfs4_free_lockowner,
6626 };
6627 
6628 /*
6629  * Alloc a lock owner structure.
6630  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6631  * occurred.
6632  *
6633  * strhashval = ownerstr_hashval
6634  */
6635 static struct nfs4_lockowner *
6636 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6637 			   struct nfs4_ol_stateid *open_stp,
6638 			   struct nfsd4_lock *lock)
6639 {
6640 	struct nfs4_lockowner *lo, *ret;
6641 
6642 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6643 	if (!lo)
6644 		return NULL;
6645 	INIT_LIST_HEAD(&lo->lo_blocked);
6646 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6647 	lo->lo_owner.so_is_open_owner = 0;
6648 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6649 	lo->lo_owner.so_ops = &lockowner_ops;
6650 	spin_lock(&clp->cl_lock);
6651 	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6652 	if (ret == NULL) {
6653 		list_add(&lo->lo_owner.so_strhash,
6654 			 &clp->cl_ownerstr_hashtbl[strhashval]);
6655 		ret = lo;
6656 	} else
6657 		nfs4_free_stateowner(&lo->lo_owner);
6658 
6659 	spin_unlock(&clp->cl_lock);
6660 	return ret;
6661 }
6662 
6663 static struct nfs4_ol_stateid *
6664 find_lock_stateid(const struct nfs4_lockowner *lo,
6665 		  const struct nfs4_ol_stateid *ost)
6666 {
6667 	struct nfs4_ol_stateid *lst;
6668 
6669 	lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6670 
6671 	/* If ost is not hashed, ost->st_locks will not be valid */
6672 	if (!nfs4_ol_stateid_unhashed(ost))
6673 		list_for_each_entry(lst, &ost->st_locks, st_locks) {
6674 			if (lst->st_stateowner == &lo->lo_owner) {
6675 				refcount_inc(&lst->st_stid.sc_count);
6676 				return lst;
6677 			}
6678 		}
6679 	return NULL;
6680 }
6681 
6682 static struct nfs4_ol_stateid *
6683 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6684 		  struct nfs4_file *fp, struct inode *inode,
6685 		  struct nfs4_ol_stateid *open_stp)
6686 {
6687 	struct nfs4_client *clp = lo->lo_owner.so_client;
6688 	struct nfs4_ol_stateid *retstp;
6689 
6690 	mutex_init(&stp->st_mutex);
6691 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6692 retry:
6693 	spin_lock(&clp->cl_lock);
6694 	if (nfs4_ol_stateid_unhashed(open_stp))
6695 		goto out_close;
6696 	retstp = find_lock_stateid(lo, open_stp);
6697 	if (retstp)
6698 		goto out_found;
6699 	refcount_inc(&stp->st_stid.sc_count);
6700 	stp->st_stid.sc_type = NFS4_LOCK_STID;
6701 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6702 	get_nfs4_file(fp);
6703 	stp->st_stid.sc_file = fp;
6704 	stp->st_access_bmap = 0;
6705 	stp->st_deny_bmap = open_stp->st_deny_bmap;
6706 	stp->st_openstp = open_stp;
6707 	spin_lock(&fp->fi_lock);
6708 	list_add(&stp->st_locks, &open_stp->st_locks);
6709 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6710 	list_add(&stp->st_perfile, &fp->fi_stateids);
6711 	spin_unlock(&fp->fi_lock);
6712 	spin_unlock(&clp->cl_lock);
6713 	return stp;
6714 out_found:
6715 	spin_unlock(&clp->cl_lock);
6716 	if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6717 		nfs4_put_stid(&retstp->st_stid);
6718 		goto retry;
6719 	}
6720 	/* To keep mutex tracking happy */
6721 	mutex_unlock(&stp->st_mutex);
6722 	return retstp;
6723 out_close:
6724 	spin_unlock(&clp->cl_lock);
6725 	mutex_unlock(&stp->st_mutex);
6726 	return NULL;
6727 }
6728 
6729 static struct nfs4_ol_stateid *
6730 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6731 			    struct inode *inode, struct nfs4_ol_stateid *ost,
6732 			    bool *new)
6733 {
6734 	struct nfs4_stid *ns = NULL;
6735 	struct nfs4_ol_stateid *lst;
6736 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6737 	struct nfs4_client *clp = oo->oo_owner.so_client;
6738 
6739 	*new = false;
6740 	spin_lock(&clp->cl_lock);
6741 	lst = find_lock_stateid(lo, ost);
6742 	spin_unlock(&clp->cl_lock);
6743 	if (lst != NULL) {
6744 		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6745 			goto out;
6746 		nfs4_put_stid(&lst->st_stid);
6747 	}
6748 	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6749 	if (ns == NULL)
6750 		return NULL;
6751 
6752 	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6753 	if (lst == openlockstateid(ns))
6754 		*new = true;
6755 	else
6756 		nfs4_put_stid(ns);
6757 out:
6758 	return lst;
6759 }
6760 
6761 static int
6762 check_lock_length(u64 offset, u64 length)
6763 {
6764 	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6765 		(length > ~offset)));
6766 }
6767 
6768 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6769 {
6770 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6771 
6772 	lockdep_assert_held(&fp->fi_lock);
6773 
6774 	if (test_access(access, lock_stp))
6775 		return;
6776 	__nfs4_file_get_access(fp, access);
6777 	set_access(access, lock_stp);
6778 }
6779 
6780 static __be32
6781 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6782 			    struct nfs4_ol_stateid *ost,
6783 			    struct nfsd4_lock *lock,
6784 			    struct nfs4_ol_stateid **plst, bool *new)
6785 {
6786 	__be32 status;
6787 	struct nfs4_file *fi = ost->st_stid.sc_file;
6788 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6789 	struct nfs4_client *cl = oo->oo_owner.so_client;
6790 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6791 	struct nfs4_lockowner *lo;
6792 	struct nfs4_ol_stateid *lst;
6793 	unsigned int strhashval;
6794 
6795 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
6796 	if (!lo) {
6797 		strhashval = ownerstr_hashval(&lock->lk_new_owner);
6798 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6799 		if (lo == NULL)
6800 			return nfserr_jukebox;
6801 	} else {
6802 		/* with an existing lockowner, seqids must be the same */
6803 		status = nfserr_bad_seqid;
6804 		if (!cstate->minorversion &&
6805 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6806 			goto out;
6807 	}
6808 
6809 	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6810 	if (lst == NULL) {
6811 		status = nfserr_jukebox;
6812 		goto out;
6813 	}
6814 
6815 	status = nfs_ok;
6816 	*plst = lst;
6817 out:
6818 	nfs4_put_stateowner(&lo->lo_owner);
6819 	return status;
6820 }
6821 
6822 /*
6823  *  LOCK operation
6824  */
6825 __be32
6826 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6827 	   union nfsd4_op_u *u)
6828 {
6829 	struct nfsd4_lock *lock = &u->lock;
6830 	struct nfs4_openowner *open_sop = NULL;
6831 	struct nfs4_lockowner *lock_sop = NULL;
6832 	struct nfs4_ol_stateid *lock_stp = NULL;
6833 	struct nfs4_ol_stateid *open_stp = NULL;
6834 	struct nfs4_file *fp;
6835 	struct nfsd_file *nf = NULL;
6836 	struct nfsd4_blocked_lock *nbl = NULL;
6837 	struct file_lock *file_lock = NULL;
6838 	struct file_lock *conflock = NULL;
6839 	struct super_block *sb;
6840 	__be32 status = 0;
6841 	int lkflg;
6842 	int err;
6843 	bool new = false;
6844 	unsigned char fl_type;
6845 	unsigned int fl_flags = FL_POSIX;
6846 	struct net *net = SVC_NET(rqstp);
6847 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6848 
6849 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6850 		(long long) lock->lk_offset,
6851 		(long long) lock->lk_length);
6852 
6853 	if (check_lock_length(lock->lk_offset, lock->lk_length))
6854 		 return nfserr_inval;
6855 
6856 	if ((status = fh_verify(rqstp, &cstate->current_fh,
6857 				S_IFREG, NFSD_MAY_LOCK))) {
6858 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
6859 		return status;
6860 	}
6861 	sb = cstate->current_fh.fh_dentry->d_sb;
6862 
6863 	if (lock->lk_is_new) {
6864 		if (nfsd4_has_session(cstate))
6865 			/* See rfc 5661 18.10.3: given clientid is ignored: */
6866 			memcpy(&lock->lk_new_clientid,
6867 				&cstate->clp->cl_clientid,
6868 				sizeof(clientid_t));
6869 
6870 		/* validate and update open stateid and open seqid */
6871 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
6872 				        lock->lk_new_open_seqid,
6873 		                        &lock->lk_new_open_stateid,
6874 					&open_stp, nn);
6875 		if (status)
6876 			goto out;
6877 		mutex_unlock(&open_stp->st_mutex);
6878 		open_sop = openowner(open_stp->st_stateowner);
6879 		status = nfserr_bad_stateid;
6880 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6881 						&lock->lk_new_clientid))
6882 			goto out;
6883 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
6884 							&lock_stp, &new);
6885 	} else {
6886 		status = nfs4_preprocess_seqid_op(cstate,
6887 				       lock->lk_old_lock_seqid,
6888 				       &lock->lk_old_lock_stateid,
6889 				       NFS4_LOCK_STID, &lock_stp, nn);
6890 	}
6891 	if (status)
6892 		goto out;
6893 	lock_sop = lockowner(lock_stp->st_stateowner);
6894 
6895 	lkflg = setlkflg(lock->lk_type);
6896 	status = nfs4_check_openmode(lock_stp, lkflg);
6897 	if (status)
6898 		goto out;
6899 
6900 	status = nfserr_grace;
6901 	if (locks_in_grace(net) && !lock->lk_reclaim)
6902 		goto out;
6903 	status = nfserr_no_grace;
6904 	if (!locks_in_grace(net) && lock->lk_reclaim)
6905 		goto out;
6906 
6907 	if (lock->lk_reclaim)
6908 		fl_flags |= FL_RECLAIM;
6909 
6910 	fp = lock_stp->st_stid.sc_file;
6911 	switch (lock->lk_type) {
6912 		case NFS4_READW_LT:
6913 			if (nfsd4_has_session(cstate) &&
6914 			    !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
6915 				fl_flags |= FL_SLEEP;
6916 			fallthrough;
6917 		case NFS4_READ_LT:
6918 			spin_lock(&fp->fi_lock);
6919 			nf = find_readable_file_locked(fp);
6920 			if (nf)
6921 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6922 			spin_unlock(&fp->fi_lock);
6923 			fl_type = F_RDLCK;
6924 			break;
6925 		case NFS4_WRITEW_LT:
6926 			if (nfsd4_has_session(cstate) &&
6927 			    !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
6928 				fl_flags |= FL_SLEEP;
6929 			fallthrough;
6930 		case NFS4_WRITE_LT:
6931 			spin_lock(&fp->fi_lock);
6932 			nf = find_writeable_file_locked(fp);
6933 			if (nf)
6934 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6935 			spin_unlock(&fp->fi_lock);
6936 			fl_type = F_WRLCK;
6937 			break;
6938 		default:
6939 			status = nfserr_inval;
6940 		goto out;
6941 	}
6942 
6943 	if (!nf) {
6944 		status = nfserr_openmode;
6945 		goto out;
6946 	}
6947 
6948 	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6949 	if (!nbl) {
6950 		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6951 		status = nfserr_jukebox;
6952 		goto out;
6953 	}
6954 
6955 	file_lock = &nbl->nbl_lock;
6956 	file_lock->fl_type = fl_type;
6957 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6958 	file_lock->fl_pid = current->tgid;
6959 	file_lock->fl_file = nf->nf_file;
6960 	file_lock->fl_flags = fl_flags;
6961 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6962 	file_lock->fl_start = lock->lk_offset;
6963 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6964 	nfs4_transform_lock_offset(file_lock);
6965 
6966 	conflock = locks_alloc_lock();
6967 	if (!conflock) {
6968 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6969 		status = nfserr_jukebox;
6970 		goto out;
6971 	}
6972 
6973 	if (fl_flags & FL_SLEEP) {
6974 		nbl->nbl_time = ktime_get_boottime_seconds();
6975 		spin_lock(&nn->blocked_locks_lock);
6976 		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6977 		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6978 		spin_unlock(&nn->blocked_locks_lock);
6979 	}
6980 
6981 	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6982 	switch (err) {
6983 	case 0: /* success! */
6984 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6985 		status = 0;
6986 		if (lock->lk_reclaim)
6987 			nn->somebody_reclaimed = true;
6988 		break;
6989 	case FILE_LOCK_DEFERRED:
6990 		nbl = NULL;
6991 		fallthrough;
6992 	case -EAGAIN:		/* conflock holds conflicting lock */
6993 		status = nfserr_denied;
6994 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6995 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
6996 		break;
6997 	case -EDEADLK:
6998 		status = nfserr_deadlock;
6999 		break;
7000 	default:
7001 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7002 		status = nfserrno(err);
7003 		break;
7004 	}
7005 out:
7006 	if (nbl) {
7007 		/* dequeue it if we queued it before */
7008 		if (fl_flags & FL_SLEEP) {
7009 			spin_lock(&nn->blocked_locks_lock);
7010 			list_del_init(&nbl->nbl_list);
7011 			list_del_init(&nbl->nbl_lru);
7012 			spin_unlock(&nn->blocked_locks_lock);
7013 		}
7014 		free_blocked_lock(nbl);
7015 	}
7016 	if (nf)
7017 		nfsd_file_put(nf);
7018 	if (lock_stp) {
7019 		/* Bump seqid manually if the 4.0 replay owner is openowner */
7020 		if (cstate->replay_owner &&
7021 		    cstate->replay_owner != &lock_sop->lo_owner &&
7022 		    seqid_mutating_err(ntohl(status)))
7023 			lock_sop->lo_owner.so_seqid++;
7024 
7025 		/*
7026 		 * If this is a new, never-before-used stateid, and we are
7027 		 * returning an error, then just go ahead and release it.
7028 		 */
7029 		if (status && new)
7030 			release_lock_stateid(lock_stp);
7031 
7032 		mutex_unlock(&lock_stp->st_mutex);
7033 
7034 		nfs4_put_stid(&lock_stp->st_stid);
7035 	}
7036 	if (open_stp)
7037 		nfs4_put_stid(&open_stp->st_stid);
7038 	nfsd4_bump_seqid(cstate, status);
7039 	if (conflock)
7040 		locks_free_lock(conflock);
7041 	return status;
7042 }
7043 
7044 /*
7045  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7046  * so we do a temporary open here just to get an open file to pass to
7047  * vfs_test_lock.
7048  */
7049 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7050 {
7051 	struct nfsd_file *nf;
7052 	__be32 err;
7053 
7054 	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7055 	if (err)
7056 		return err;
7057 	fh_lock(fhp); /* to block new leases till after test_lock: */
7058 	err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
7059 							NFSD_MAY_READ));
7060 	if (err)
7061 		goto out;
7062 	lock->fl_file = nf->nf_file;
7063 	err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7064 	lock->fl_file = NULL;
7065 out:
7066 	fh_unlock(fhp);
7067 	nfsd_file_put(nf);
7068 	return err;
7069 }
7070 
7071 /*
7072  * LOCKT operation
7073  */
7074 __be32
7075 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7076 	    union nfsd4_op_u *u)
7077 {
7078 	struct nfsd4_lockt *lockt = &u->lockt;
7079 	struct file_lock *file_lock = NULL;
7080 	struct nfs4_lockowner *lo = NULL;
7081 	__be32 status;
7082 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7083 
7084 	if (locks_in_grace(SVC_NET(rqstp)))
7085 		return nfserr_grace;
7086 
7087 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7088 		 return nfserr_inval;
7089 
7090 	if (!nfsd4_has_session(cstate)) {
7091 		status = set_client(&lockt->lt_clientid, cstate, nn);
7092 		if (status)
7093 			goto out;
7094 	}
7095 
7096 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7097 		goto out;
7098 
7099 	file_lock = locks_alloc_lock();
7100 	if (!file_lock) {
7101 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7102 		status = nfserr_jukebox;
7103 		goto out;
7104 	}
7105 
7106 	switch (lockt->lt_type) {
7107 		case NFS4_READ_LT:
7108 		case NFS4_READW_LT:
7109 			file_lock->fl_type = F_RDLCK;
7110 			break;
7111 		case NFS4_WRITE_LT:
7112 		case NFS4_WRITEW_LT:
7113 			file_lock->fl_type = F_WRLCK;
7114 			break;
7115 		default:
7116 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7117 			status = nfserr_inval;
7118 			goto out;
7119 	}
7120 
7121 	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7122 	if (lo)
7123 		file_lock->fl_owner = (fl_owner_t)lo;
7124 	file_lock->fl_pid = current->tgid;
7125 	file_lock->fl_flags = FL_POSIX;
7126 
7127 	file_lock->fl_start = lockt->lt_offset;
7128 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7129 
7130 	nfs4_transform_lock_offset(file_lock);
7131 
7132 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7133 	if (status)
7134 		goto out;
7135 
7136 	if (file_lock->fl_type != F_UNLCK) {
7137 		status = nfserr_denied;
7138 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7139 	}
7140 out:
7141 	if (lo)
7142 		nfs4_put_stateowner(&lo->lo_owner);
7143 	if (file_lock)
7144 		locks_free_lock(file_lock);
7145 	return status;
7146 }
7147 
7148 __be32
7149 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7150 	    union nfsd4_op_u *u)
7151 {
7152 	struct nfsd4_locku *locku = &u->locku;
7153 	struct nfs4_ol_stateid *stp;
7154 	struct nfsd_file *nf = NULL;
7155 	struct file_lock *file_lock = NULL;
7156 	__be32 status;
7157 	int err;
7158 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7159 
7160 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7161 		(long long) locku->lu_offset,
7162 		(long long) locku->lu_length);
7163 
7164 	if (check_lock_length(locku->lu_offset, locku->lu_length))
7165 		 return nfserr_inval;
7166 
7167 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7168 					&locku->lu_stateid, NFS4_LOCK_STID,
7169 					&stp, nn);
7170 	if (status)
7171 		goto out;
7172 	nf = find_any_file(stp->st_stid.sc_file);
7173 	if (!nf) {
7174 		status = nfserr_lock_range;
7175 		goto put_stateid;
7176 	}
7177 	file_lock = locks_alloc_lock();
7178 	if (!file_lock) {
7179 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7180 		status = nfserr_jukebox;
7181 		goto put_file;
7182 	}
7183 
7184 	file_lock->fl_type = F_UNLCK;
7185 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7186 	file_lock->fl_pid = current->tgid;
7187 	file_lock->fl_file = nf->nf_file;
7188 	file_lock->fl_flags = FL_POSIX;
7189 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
7190 	file_lock->fl_start = locku->lu_offset;
7191 
7192 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
7193 						locku->lu_length);
7194 	nfs4_transform_lock_offset(file_lock);
7195 
7196 	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7197 	if (err) {
7198 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7199 		goto out_nfserr;
7200 	}
7201 	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7202 put_file:
7203 	nfsd_file_put(nf);
7204 put_stateid:
7205 	mutex_unlock(&stp->st_mutex);
7206 	nfs4_put_stid(&stp->st_stid);
7207 out:
7208 	nfsd4_bump_seqid(cstate, status);
7209 	if (file_lock)
7210 		locks_free_lock(file_lock);
7211 	return status;
7212 
7213 out_nfserr:
7214 	status = nfserrno(err);
7215 	goto put_file;
7216 }
7217 
7218 /*
7219  * returns
7220  * 	true:  locks held by lockowner
7221  * 	false: no locks held by lockowner
7222  */
7223 static bool
7224 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7225 {
7226 	struct file_lock *fl;
7227 	int status = false;
7228 	struct nfsd_file *nf = find_any_file(fp);
7229 	struct inode *inode;
7230 	struct file_lock_context *flctx;
7231 
7232 	if (!nf) {
7233 		/* Any valid lock stateid should have some sort of access */
7234 		WARN_ON_ONCE(1);
7235 		return status;
7236 	}
7237 
7238 	inode = locks_inode(nf->nf_file);
7239 	flctx = inode->i_flctx;
7240 
7241 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7242 		spin_lock(&flctx->flc_lock);
7243 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7244 			if (fl->fl_owner == (fl_owner_t)lowner) {
7245 				status = true;
7246 				break;
7247 			}
7248 		}
7249 		spin_unlock(&flctx->flc_lock);
7250 	}
7251 	nfsd_file_put(nf);
7252 	return status;
7253 }
7254 
7255 __be32
7256 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7257 			struct nfsd4_compound_state *cstate,
7258 			union nfsd4_op_u *u)
7259 {
7260 	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7261 	clientid_t *clid = &rlockowner->rl_clientid;
7262 	struct nfs4_stateowner *sop;
7263 	struct nfs4_lockowner *lo = NULL;
7264 	struct nfs4_ol_stateid *stp;
7265 	struct xdr_netobj *owner = &rlockowner->rl_owner;
7266 	unsigned int hashval = ownerstr_hashval(owner);
7267 	__be32 status;
7268 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7269 	struct nfs4_client *clp;
7270 	LIST_HEAD (reaplist);
7271 
7272 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7273 		clid->cl_boot, clid->cl_id);
7274 
7275 	status = set_client(clid, cstate, nn);
7276 	if (status)
7277 		return status;
7278 
7279 	clp = cstate->clp;
7280 	/* Find the matching lock stateowner */
7281 	spin_lock(&clp->cl_lock);
7282 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
7283 			    so_strhash) {
7284 
7285 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
7286 			continue;
7287 
7288 		/* see if there are still any locks associated with it */
7289 		lo = lockowner(sop);
7290 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
7291 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
7292 				status = nfserr_locks_held;
7293 				spin_unlock(&clp->cl_lock);
7294 				return status;
7295 			}
7296 		}
7297 
7298 		nfs4_get_stateowner(sop);
7299 		break;
7300 	}
7301 	if (!lo) {
7302 		spin_unlock(&clp->cl_lock);
7303 		return status;
7304 	}
7305 
7306 	unhash_lockowner_locked(lo);
7307 	while (!list_empty(&lo->lo_owner.so_stateids)) {
7308 		stp = list_first_entry(&lo->lo_owner.so_stateids,
7309 				       struct nfs4_ol_stateid,
7310 				       st_perstateowner);
7311 		WARN_ON(!unhash_lock_stateid(stp));
7312 		put_ol_stateid_locked(stp, &reaplist);
7313 	}
7314 	spin_unlock(&clp->cl_lock);
7315 	free_ol_stateid_reaplist(&reaplist);
7316 	remove_blocked_locks(lo);
7317 	nfs4_put_stateowner(&lo->lo_owner);
7318 
7319 	return status;
7320 }
7321 
7322 static inline struct nfs4_client_reclaim *
7323 alloc_reclaim(void)
7324 {
7325 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7326 }
7327 
7328 bool
7329 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7330 {
7331 	struct nfs4_client_reclaim *crp;
7332 
7333 	crp = nfsd4_find_reclaim_client(name, nn);
7334 	return (crp && crp->cr_clp);
7335 }
7336 
7337 /*
7338  * failure => all reset bets are off, nfserr_no_grace...
7339  *
7340  * The caller is responsible for freeing name.data if NULL is returned (it
7341  * will be freed in nfs4_remove_reclaim_record in the normal case).
7342  */
7343 struct nfs4_client_reclaim *
7344 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7345 		struct nfsd_net *nn)
7346 {
7347 	unsigned int strhashval;
7348 	struct nfs4_client_reclaim *crp;
7349 
7350 	crp = alloc_reclaim();
7351 	if (crp) {
7352 		strhashval = clientstr_hashval(name);
7353 		INIT_LIST_HEAD(&crp->cr_strhash);
7354 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7355 		crp->cr_name.data = name.data;
7356 		crp->cr_name.len = name.len;
7357 		crp->cr_princhash.data = princhash.data;
7358 		crp->cr_princhash.len = princhash.len;
7359 		crp->cr_clp = NULL;
7360 		nn->reclaim_str_hashtbl_size++;
7361 	}
7362 	return crp;
7363 }
7364 
7365 void
7366 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7367 {
7368 	list_del(&crp->cr_strhash);
7369 	kfree(crp->cr_name.data);
7370 	kfree(crp->cr_princhash.data);
7371 	kfree(crp);
7372 	nn->reclaim_str_hashtbl_size--;
7373 }
7374 
7375 void
7376 nfs4_release_reclaim(struct nfsd_net *nn)
7377 {
7378 	struct nfs4_client_reclaim *crp = NULL;
7379 	int i;
7380 
7381 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7382 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7383 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7384 			                struct nfs4_client_reclaim, cr_strhash);
7385 			nfs4_remove_reclaim_record(crp, nn);
7386 		}
7387 	}
7388 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7389 }
7390 
7391 /*
7392  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7393 struct nfs4_client_reclaim *
7394 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7395 {
7396 	unsigned int strhashval;
7397 	struct nfs4_client_reclaim *crp = NULL;
7398 
7399 	strhashval = clientstr_hashval(name);
7400 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7401 		if (compare_blob(&crp->cr_name, &name) == 0) {
7402 			return crp;
7403 		}
7404 	}
7405 	return NULL;
7406 }
7407 
7408 __be32
7409 nfs4_check_open_reclaim(struct nfs4_client *clp)
7410 {
7411 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7412 		return nfserr_no_grace;
7413 
7414 	if (nfsd4_client_record_check(clp))
7415 		return nfserr_reclaim_bad;
7416 
7417 	return nfs_ok;
7418 }
7419 
7420 /*
7421  * Since the lifetime of a delegation isn't limited to that of an open, a
7422  * client may quite reasonably hang on to a delegation as long as it has
7423  * the inode cached.  This becomes an obvious problem the first time a
7424  * client's inode cache approaches the size of the server's total memory.
7425  *
7426  * For now we avoid this problem by imposing a hard limit on the number
7427  * of delegations, which varies according to the server's memory size.
7428  */
7429 static void
7430 set_max_delegations(void)
7431 {
7432 	/*
7433 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
7434 	 * estimates suggest that in the worst case (where every delegation
7435 	 * is for a different inode), a delegation could take about 1.5K,
7436 	 * giving a worst case usage of about 6% of memory.
7437 	 */
7438 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7439 }
7440 
7441 static int nfs4_state_create_net(struct net *net)
7442 {
7443 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7444 	int i;
7445 
7446 	nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7447 					    sizeof(struct list_head),
7448 					    GFP_KERNEL);
7449 	if (!nn->conf_id_hashtbl)
7450 		goto err;
7451 	nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7452 					      sizeof(struct list_head),
7453 					      GFP_KERNEL);
7454 	if (!nn->unconf_id_hashtbl)
7455 		goto err_unconf_id;
7456 	nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7457 					      sizeof(struct list_head),
7458 					      GFP_KERNEL);
7459 	if (!nn->sessionid_hashtbl)
7460 		goto err_sessionid;
7461 
7462 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7463 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7464 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7465 	}
7466 	for (i = 0; i < SESSION_HASH_SIZE; i++)
7467 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7468 	nn->conf_name_tree = RB_ROOT;
7469 	nn->unconf_name_tree = RB_ROOT;
7470 	nn->boot_time = ktime_get_real_seconds();
7471 	nn->grace_ended = false;
7472 	nn->nfsd4_manager.block_opens = true;
7473 	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7474 	INIT_LIST_HEAD(&nn->client_lru);
7475 	INIT_LIST_HEAD(&nn->close_lru);
7476 	INIT_LIST_HEAD(&nn->del_recall_lru);
7477 	spin_lock_init(&nn->client_lock);
7478 	spin_lock_init(&nn->s2s_cp_lock);
7479 	idr_init(&nn->s2s_cp_stateids);
7480 
7481 	spin_lock_init(&nn->blocked_locks_lock);
7482 	INIT_LIST_HEAD(&nn->blocked_locks_lru);
7483 
7484 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7485 	get_net(net);
7486 
7487 	return 0;
7488 
7489 err_sessionid:
7490 	kfree(nn->unconf_id_hashtbl);
7491 err_unconf_id:
7492 	kfree(nn->conf_id_hashtbl);
7493 err:
7494 	return -ENOMEM;
7495 }
7496 
7497 static void
7498 nfs4_state_destroy_net(struct net *net)
7499 {
7500 	int i;
7501 	struct nfs4_client *clp = NULL;
7502 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7503 
7504 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7505 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
7506 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7507 			destroy_client(clp);
7508 		}
7509 	}
7510 
7511 	WARN_ON(!list_empty(&nn->blocked_locks_lru));
7512 
7513 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7514 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7515 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7516 			destroy_client(clp);
7517 		}
7518 	}
7519 
7520 	kfree(nn->sessionid_hashtbl);
7521 	kfree(nn->unconf_id_hashtbl);
7522 	kfree(nn->conf_id_hashtbl);
7523 	put_net(net);
7524 }
7525 
7526 int
7527 nfs4_state_start_net(struct net *net)
7528 {
7529 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7530 	int ret;
7531 
7532 	ret = nfs4_state_create_net(net);
7533 	if (ret)
7534 		return ret;
7535 	locks_start_grace(net, &nn->nfsd4_manager);
7536 	nfsd4_client_tracking_init(net);
7537 	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7538 		goto skip_grace;
7539 	printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7540 	       nn->nfsd4_grace, net->ns.inum);
7541 	trace_nfsd_grace_start(nn);
7542 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7543 	return 0;
7544 
7545 skip_grace:
7546 	printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7547 			net->ns.inum);
7548 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7549 	nfsd4_end_grace(nn);
7550 	return 0;
7551 }
7552 
7553 /* initialization to perform when the nfsd service is started: */
7554 
7555 int
7556 nfs4_state_start(void)
7557 {
7558 	int ret;
7559 
7560 	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7561 	if (laundry_wq == NULL) {
7562 		ret = -ENOMEM;
7563 		goto out;
7564 	}
7565 	ret = nfsd4_create_callback_queue();
7566 	if (ret)
7567 		goto out_free_laundry;
7568 
7569 	set_max_delegations();
7570 	return 0;
7571 
7572 out_free_laundry:
7573 	destroy_workqueue(laundry_wq);
7574 out:
7575 	return ret;
7576 }
7577 
7578 void
7579 nfs4_state_shutdown_net(struct net *net)
7580 {
7581 	struct nfs4_delegation *dp = NULL;
7582 	struct list_head *pos, *next, reaplist;
7583 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7584 
7585 	cancel_delayed_work_sync(&nn->laundromat_work);
7586 	locks_end_grace(&nn->nfsd4_manager);
7587 
7588 	INIT_LIST_HEAD(&reaplist);
7589 	spin_lock(&state_lock);
7590 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
7591 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7592 		WARN_ON(!unhash_delegation_locked(dp));
7593 		list_add(&dp->dl_recall_lru, &reaplist);
7594 	}
7595 	spin_unlock(&state_lock);
7596 	list_for_each_safe(pos, next, &reaplist) {
7597 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7598 		list_del_init(&dp->dl_recall_lru);
7599 		destroy_unhashed_deleg(dp);
7600 	}
7601 
7602 	nfsd4_client_tracking_exit(net);
7603 	nfs4_state_destroy_net(net);
7604 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
7605 	nfsd4_ssc_shutdown_umount(nn);
7606 #endif
7607 }
7608 
7609 void
7610 nfs4_state_shutdown(void)
7611 {
7612 	destroy_workqueue(laundry_wq);
7613 	nfsd4_destroy_callback_queue();
7614 }
7615 
7616 static void
7617 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7618 {
7619 	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7620 	    CURRENT_STATEID(stateid))
7621 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7622 }
7623 
7624 static void
7625 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7626 {
7627 	if (cstate->minorversion) {
7628 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7629 		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7630 	}
7631 }
7632 
7633 void
7634 clear_current_stateid(struct nfsd4_compound_state *cstate)
7635 {
7636 	CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7637 }
7638 
7639 /*
7640  * functions to set current state id
7641  */
7642 void
7643 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7644 		union nfsd4_op_u *u)
7645 {
7646 	put_stateid(cstate, &u->open_downgrade.od_stateid);
7647 }
7648 
7649 void
7650 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7651 		union nfsd4_op_u *u)
7652 {
7653 	put_stateid(cstate, &u->open.op_stateid);
7654 }
7655 
7656 void
7657 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7658 		union nfsd4_op_u *u)
7659 {
7660 	put_stateid(cstate, &u->close.cl_stateid);
7661 }
7662 
7663 void
7664 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7665 		union nfsd4_op_u *u)
7666 {
7667 	put_stateid(cstate, &u->lock.lk_resp_stateid);
7668 }
7669 
7670 /*
7671  * functions to consume current state id
7672  */
7673 
7674 void
7675 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7676 		union nfsd4_op_u *u)
7677 {
7678 	get_stateid(cstate, &u->open_downgrade.od_stateid);
7679 }
7680 
7681 void
7682 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7683 		union nfsd4_op_u *u)
7684 {
7685 	get_stateid(cstate, &u->delegreturn.dr_stateid);
7686 }
7687 
7688 void
7689 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7690 		union nfsd4_op_u *u)
7691 {
7692 	get_stateid(cstate, &u->free_stateid.fr_stateid);
7693 }
7694 
7695 void
7696 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7697 		union nfsd4_op_u *u)
7698 {
7699 	get_stateid(cstate, &u->setattr.sa_stateid);
7700 }
7701 
7702 void
7703 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7704 		union nfsd4_op_u *u)
7705 {
7706 	get_stateid(cstate, &u->close.cl_stateid);
7707 }
7708 
7709 void
7710 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7711 		union nfsd4_op_u *u)
7712 {
7713 	get_stateid(cstate, &u->locku.lu_stateid);
7714 }
7715 
7716 void
7717 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7718 		union nfsd4_op_u *u)
7719 {
7720 	get_stateid(cstate, &u->read.rd_stateid);
7721 }
7722 
7723 void
7724 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7725 		union nfsd4_op_u *u)
7726 {
7727 	get_stateid(cstate, &u->write.wr_stateid);
7728 }
7729