1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include <linux/rhashtable.h>
48 #include <linux/nfs_ssc.h>
49
50 #include "xdr4.h"
51 #include "xdr4cb.h"
52 #include "vfs.h"
53 #include "current_stateid.h"
54
55 #include "netns.h"
56 #include "pnfs.h"
57 #include "filecache.h"
58 #include "trace.h"
59
60 #define NFSDDBG_FACILITY NFSDDBG_PROC
61
62 #define all_ones {{~0,~0},~0}
63 static const stateid_t one_stateid = {
64 .si_generation = ~0,
65 .si_opaque = all_ones,
66 };
67 static const stateid_t zero_stateid = {
68 /* all fields zero */
69 };
70 static const stateid_t currentstateid = {
71 .si_generation = 1,
72 };
73 static const stateid_t close_stateid = {
74 .si_generation = 0xffffffffU,
75 };
76
77 static u64 current_sessionid = 1;
78
79 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
80 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
81 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
82 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
83
84 /* forward declarations */
85 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
86 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
87 void nfsd4_end_grace(struct nfsd_net *nn);
88 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
89 static void nfsd4_file_hash_remove(struct nfs4_file *fi);
90
91 /* Locking: */
92
93 /*
94 * Currently used for the del_recall_lru and file hash table. In an
95 * effort to decrease the scope of the client_mutex, this spinlock may
96 * eventually cover more:
97 */
98 static DEFINE_SPINLOCK(state_lock);
99
100 enum nfsd4_st_mutex_lock_subclass {
101 OPEN_STATEID_MUTEX = 0,
102 LOCK_STATEID_MUTEX = 1,
103 };
104
105 /*
106 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
107 * the refcount on the open stateid to drop.
108 */
109 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
110
111 /*
112 * A waitqueue where a writer to clients/#/ctl destroying a client can
113 * wait for cl_rpc_users to drop to 0 and then for the client to be
114 * unhashed.
115 */
116 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
117
118 static struct kmem_cache *client_slab;
119 static struct kmem_cache *openowner_slab;
120 static struct kmem_cache *lockowner_slab;
121 static struct kmem_cache *file_slab;
122 static struct kmem_cache *stateid_slab;
123 static struct kmem_cache *deleg_slab;
124 static struct kmem_cache *odstate_slab;
125
126 static void free_session(struct nfsd4_session *);
127
128 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
129 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
130
131 static struct workqueue_struct *laundry_wq;
132
nfsd4_create_laundry_wq(void)133 int nfsd4_create_laundry_wq(void)
134 {
135 int rc = 0;
136
137 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
138 if (laundry_wq == NULL)
139 rc = -ENOMEM;
140 return rc;
141 }
142
nfsd4_destroy_laundry_wq(void)143 void nfsd4_destroy_laundry_wq(void)
144 {
145 destroy_workqueue(laundry_wq);
146 }
147
is_session_dead(struct nfsd4_session * ses)148 static bool is_session_dead(struct nfsd4_session *ses)
149 {
150 return ses->se_flags & NFS4_SESSION_DEAD;
151 }
152
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)153 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
154 {
155 if (atomic_read(&ses->se_ref) > ref_held_by_me)
156 return nfserr_jukebox;
157 ses->se_flags |= NFS4_SESSION_DEAD;
158 return nfs_ok;
159 }
160
is_client_expired(struct nfs4_client * clp)161 static bool is_client_expired(struct nfs4_client *clp)
162 {
163 return clp->cl_time == 0;
164 }
165
nfsd4_dec_courtesy_client_count(struct nfsd_net * nn,struct nfs4_client * clp)166 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
167 struct nfs4_client *clp)
168 {
169 if (clp->cl_state != NFSD4_ACTIVE)
170 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
171 }
172
get_client_locked(struct nfs4_client * clp)173 static __be32 get_client_locked(struct nfs4_client *clp)
174 {
175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
176
177 lockdep_assert_held(&nn->client_lock);
178
179 if (is_client_expired(clp))
180 return nfserr_expired;
181 atomic_inc(&clp->cl_rpc_users);
182 nfsd4_dec_courtesy_client_count(nn, clp);
183 clp->cl_state = NFSD4_ACTIVE;
184 return nfs_ok;
185 }
186
187 /* must be called under the client_lock */
188 static inline void
renew_client_locked(struct nfs4_client * clp)189 renew_client_locked(struct nfs4_client *clp)
190 {
191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
192
193 if (is_client_expired(clp)) {
194 WARN_ON(1);
195 printk("%s: client (clientid %08x/%08x) already expired\n",
196 __func__,
197 clp->cl_clientid.cl_boot,
198 clp->cl_clientid.cl_id);
199 return;
200 }
201
202 list_move_tail(&clp->cl_lru, &nn->client_lru);
203 clp->cl_time = ktime_get_boottime_seconds();
204 nfsd4_dec_courtesy_client_count(nn, clp);
205 clp->cl_state = NFSD4_ACTIVE;
206 }
207
put_client_renew_locked(struct nfs4_client * clp)208 static void put_client_renew_locked(struct nfs4_client *clp)
209 {
210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
211
212 lockdep_assert_held(&nn->client_lock);
213
214 if (!atomic_dec_and_test(&clp->cl_rpc_users))
215 return;
216 if (!is_client_expired(clp))
217 renew_client_locked(clp);
218 else
219 wake_up_all(&expiry_wq);
220 }
221
put_client_renew(struct nfs4_client * clp)222 static void put_client_renew(struct nfs4_client *clp)
223 {
224 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
225
226 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
227 return;
228 if (!is_client_expired(clp))
229 renew_client_locked(clp);
230 else
231 wake_up_all(&expiry_wq);
232 spin_unlock(&nn->client_lock);
233 }
234
nfsd4_get_session_locked(struct nfsd4_session * ses)235 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
236 {
237 __be32 status;
238
239 if (is_session_dead(ses))
240 return nfserr_badsession;
241 status = get_client_locked(ses->se_client);
242 if (status)
243 return status;
244 atomic_inc(&ses->se_ref);
245 return nfs_ok;
246 }
247
nfsd4_put_session_locked(struct nfsd4_session * ses)248 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
249 {
250 struct nfs4_client *clp = ses->se_client;
251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
252
253 lockdep_assert_held(&nn->client_lock);
254
255 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
256 free_session(ses);
257 put_client_renew_locked(clp);
258 }
259
nfsd4_put_session(struct nfsd4_session * ses)260 static void nfsd4_put_session(struct nfsd4_session *ses)
261 {
262 struct nfs4_client *clp = ses->se_client;
263 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
264
265 spin_lock(&nn->client_lock);
266 nfsd4_put_session_locked(ses);
267 spin_unlock(&nn->client_lock);
268 }
269
270 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)271 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
272 struct nfsd_net *nn)
273 {
274 struct nfsd4_blocked_lock *cur, *found = NULL;
275
276 spin_lock(&nn->blocked_locks_lock);
277 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
278 if (fh_match(fh, &cur->nbl_fh)) {
279 list_del_init(&cur->nbl_list);
280 WARN_ON(list_empty(&cur->nbl_lru));
281 list_del_init(&cur->nbl_lru);
282 found = cur;
283 break;
284 }
285 }
286 spin_unlock(&nn->blocked_locks_lock);
287 if (found)
288 locks_delete_block(&found->nbl_lock);
289 return found;
290 }
291
292 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)293 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
294 struct nfsd_net *nn)
295 {
296 struct nfsd4_blocked_lock *nbl;
297
298 nbl = find_blocked_lock(lo, fh, nn);
299 if (!nbl) {
300 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
301 if (nbl) {
302 INIT_LIST_HEAD(&nbl->nbl_list);
303 INIT_LIST_HEAD(&nbl->nbl_lru);
304 fh_copy_shallow(&nbl->nbl_fh, fh);
305 locks_init_lock(&nbl->nbl_lock);
306 kref_init(&nbl->nbl_kref);
307 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
308 &nfsd4_cb_notify_lock_ops,
309 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
310 }
311 }
312 return nbl;
313 }
314
315 static void
free_nbl(struct kref * kref)316 free_nbl(struct kref *kref)
317 {
318 struct nfsd4_blocked_lock *nbl;
319
320 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
321 kfree(nbl);
322 }
323
324 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)325 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
326 {
327 locks_delete_block(&nbl->nbl_lock);
328 locks_release_private(&nbl->nbl_lock);
329 kref_put(&nbl->nbl_kref, free_nbl);
330 }
331
332 static void
remove_blocked_locks(struct nfs4_lockowner * lo)333 remove_blocked_locks(struct nfs4_lockowner *lo)
334 {
335 struct nfs4_client *clp = lo->lo_owner.so_client;
336 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
337 struct nfsd4_blocked_lock *nbl;
338 LIST_HEAD(reaplist);
339
340 /* Dequeue all blocked locks */
341 spin_lock(&nn->blocked_locks_lock);
342 while (!list_empty(&lo->lo_blocked)) {
343 nbl = list_first_entry(&lo->lo_blocked,
344 struct nfsd4_blocked_lock,
345 nbl_list);
346 list_del_init(&nbl->nbl_list);
347 WARN_ON(list_empty(&nbl->nbl_lru));
348 list_move(&nbl->nbl_lru, &reaplist);
349 }
350 spin_unlock(&nn->blocked_locks_lock);
351
352 /* Now free them */
353 while (!list_empty(&reaplist)) {
354 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
355 nbl_lru);
356 list_del_init(&nbl->nbl_lru);
357 free_blocked_lock(nbl);
358 }
359 }
360
361 static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback * cb)362 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
363 {
364 struct nfsd4_blocked_lock *nbl = container_of(cb,
365 struct nfsd4_blocked_lock, nbl_cb);
366 locks_delete_block(&nbl->nbl_lock);
367 }
368
369 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)370 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
371 {
372 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
373
374 /*
375 * Since this is just an optimization, we don't try very hard if it
376 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
377 * just quit trying on anything else.
378 */
379 switch (task->tk_status) {
380 case -NFS4ERR_DELAY:
381 rpc_delay(task, 1 * HZ);
382 return 0;
383 default:
384 return 1;
385 }
386 }
387
388 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)389 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
390 {
391 struct nfsd4_blocked_lock *nbl = container_of(cb,
392 struct nfsd4_blocked_lock, nbl_cb);
393
394 free_blocked_lock(nbl);
395 }
396
397 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
398 .prepare = nfsd4_cb_notify_lock_prepare,
399 .done = nfsd4_cb_notify_lock_done,
400 .release = nfsd4_cb_notify_lock_release,
401 };
402
403 /*
404 * We store the NONE, READ, WRITE, and BOTH bits separately in the
405 * st_{access,deny}_bmap field of the stateid, in order to track not
406 * only what share bits are currently in force, but also what
407 * combinations of share bits previous opens have used. This allows us
408 * to enforce the recommendation in
409 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
410 * the server return an error if the client attempt to downgrade to a
411 * combination of share bits not explicable by closing some of its
412 * previous opens.
413 *
414 * This enforcement is arguably incomplete, since we don't keep
415 * track of access/deny bit combinations; so, e.g., we allow:
416 *
417 * OPEN allow read, deny write
418 * OPEN allow both, deny none
419 * DOWNGRADE allow read, deny none
420 *
421 * which we should reject.
422 *
423 * But you could also argue that our current code is already overkill,
424 * since it only exists to return NFS4ERR_INVAL on incorrect client
425 * behavior.
426 */
427 static unsigned int
bmap_to_share_mode(unsigned long bmap)428 bmap_to_share_mode(unsigned long bmap)
429 {
430 int i;
431 unsigned int access = 0;
432
433 for (i = 1; i < 4; i++) {
434 if (test_bit(i, &bmap))
435 access |= i;
436 }
437 return access;
438 }
439
440 /* set share access for a given stateid */
441 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)442 set_access(u32 access, struct nfs4_ol_stateid *stp)
443 {
444 unsigned char mask = 1 << access;
445
446 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
447 stp->st_access_bmap |= mask;
448 }
449
450 /* clear share access for a given stateid */
451 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)452 clear_access(u32 access, struct nfs4_ol_stateid *stp)
453 {
454 unsigned char mask = 1 << access;
455
456 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
457 stp->st_access_bmap &= ~mask;
458 }
459
460 /* test whether a given stateid has access */
461 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)462 test_access(u32 access, struct nfs4_ol_stateid *stp)
463 {
464 unsigned char mask = 1 << access;
465
466 return (bool)(stp->st_access_bmap & mask);
467 }
468
469 /* set share deny for a given stateid */
470 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)471 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
472 {
473 unsigned char mask = 1 << deny;
474
475 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
476 stp->st_deny_bmap |= mask;
477 }
478
479 /* clear share deny for a given stateid */
480 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)481 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
482 {
483 unsigned char mask = 1 << deny;
484
485 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
486 stp->st_deny_bmap &= ~mask;
487 }
488
489 /* test whether a given stateid is denying specific access */
490 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)491 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
492 {
493 unsigned char mask = 1 << deny;
494
495 return (bool)(stp->st_deny_bmap & mask);
496 }
497
nfs4_access_to_omode(u32 access)498 static int nfs4_access_to_omode(u32 access)
499 {
500 switch (access & NFS4_SHARE_ACCESS_BOTH) {
501 case NFS4_SHARE_ACCESS_READ:
502 return O_RDONLY;
503 case NFS4_SHARE_ACCESS_WRITE:
504 return O_WRONLY;
505 case NFS4_SHARE_ACCESS_BOTH:
506 return O_RDWR;
507 }
508 WARN_ON_ONCE(1);
509 return O_RDONLY;
510 }
511
512 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)513 access_permit_read(struct nfs4_ol_stateid *stp)
514 {
515 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
516 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
517 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
518 }
519
520 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)521 access_permit_write(struct nfs4_ol_stateid *stp)
522 {
523 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
524 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
525 }
526
527 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)528 nfs4_get_stateowner(struct nfs4_stateowner *sop)
529 {
530 atomic_inc(&sop->so_count);
531 return sop;
532 }
533
534 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)535 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
536 {
537 return (sop->so_owner.len == owner->len) &&
538 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
539 }
540
541 static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)542 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
543 struct nfs4_client *clp)
544 {
545 struct nfs4_stateowner *so;
546
547 lockdep_assert_held(&clp->cl_lock);
548
549 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
550 so_strhash) {
551 if (!so->so_is_open_owner)
552 continue;
553 if (same_owner_str(so, &open->op_owner))
554 return openowner(nfs4_get_stateowner(so));
555 }
556 return NULL;
557 }
558
559 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)560 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
561 struct nfs4_client *clp)
562 {
563 struct nfs4_openowner *oo;
564
565 spin_lock(&clp->cl_lock);
566 oo = find_openstateowner_str_locked(hashval, open, clp);
567 spin_unlock(&clp->cl_lock);
568 return oo;
569 }
570
571 static inline u32
opaque_hashval(const void * ptr,int nbytes)572 opaque_hashval(const void *ptr, int nbytes)
573 {
574 unsigned char *cptr = (unsigned char *) ptr;
575
576 u32 x = 0;
577 while (nbytes--) {
578 x *= 37;
579 x += *cptr++;
580 }
581 return x;
582 }
583
nfsd4_free_file_rcu(struct rcu_head * rcu)584 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
585 {
586 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
587
588 kmem_cache_free(file_slab, fp);
589 }
590
591 void
put_nfs4_file(struct nfs4_file * fi)592 put_nfs4_file(struct nfs4_file *fi)
593 {
594 if (refcount_dec_and_test(&fi->fi_ref)) {
595 nfsd4_file_hash_remove(fi);
596 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
597 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
598 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
599 }
600 }
601
602 static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file * f)603 find_writeable_file_locked(struct nfs4_file *f)
604 {
605 struct nfsd_file *ret;
606
607 lockdep_assert_held(&f->fi_lock);
608
609 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
610 if (!ret)
611 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
612 return ret;
613 }
614
615 static struct nfsd_file *
find_writeable_file(struct nfs4_file * f)616 find_writeable_file(struct nfs4_file *f)
617 {
618 struct nfsd_file *ret;
619
620 spin_lock(&f->fi_lock);
621 ret = find_writeable_file_locked(f);
622 spin_unlock(&f->fi_lock);
623
624 return ret;
625 }
626
627 static struct nfsd_file *
find_readable_file_locked(struct nfs4_file * f)628 find_readable_file_locked(struct nfs4_file *f)
629 {
630 struct nfsd_file *ret;
631
632 lockdep_assert_held(&f->fi_lock);
633
634 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
635 if (!ret)
636 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
637 return ret;
638 }
639
640 static struct nfsd_file *
find_readable_file(struct nfs4_file * f)641 find_readable_file(struct nfs4_file *f)
642 {
643 struct nfsd_file *ret;
644
645 spin_lock(&f->fi_lock);
646 ret = find_readable_file_locked(f);
647 spin_unlock(&f->fi_lock);
648
649 return ret;
650 }
651
652 static struct nfsd_file *
find_rw_file(struct nfs4_file * f)653 find_rw_file(struct nfs4_file *f)
654 {
655 struct nfsd_file *ret;
656
657 spin_lock(&f->fi_lock);
658 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
659 spin_unlock(&f->fi_lock);
660
661 return ret;
662 }
663
664 struct nfsd_file *
find_any_file(struct nfs4_file * f)665 find_any_file(struct nfs4_file *f)
666 {
667 struct nfsd_file *ret;
668
669 if (!f)
670 return NULL;
671 spin_lock(&f->fi_lock);
672 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
673 if (!ret) {
674 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
675 if (!ret)
676 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
677 }
678 spin_unlock(&f->fi_lock);
679 return ret;
680 }
681
find_any_file_locked(struct nfs4_file * f)682 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
683 {
684 lockdep_assert_held(&f->fi_lock);
685
686 if (f->fi_fds[O_RDWR])
687 return f->fi_fds[O_RDWR];
688 if (f->fi_fds[O_WRONLY])
689 return f->fi_fds[O_WRONLY];
690 if (f->fi_fds[O_RDONLY])
691 return f->fi_fds[O_RDONLY];
692 return NULL;
693 }
694
695 static atomic_long_t num_delegations;
696 unsigned long max_delegations;
697
698 /*
699 * Open owner state (share locks)
700 */
701
702 /* hash tables for lock and open owners */
703 #define OWNER_HASH_BITS 8
704 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
705 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
706
ownerstr_hashval(struct xdr_netobj * ownername)707 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
708 {
709 unsigned int ret;
710
711 ret = opaque_hashval(ownername->data, ownername->len);
712 return ret & OWNER_HASH_MASK;
713 }
714
715 static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
716
717 static const struct rhashtable_params nfs4_file_rhash_params = {
718 .key_len = sizeof_field(struct nfs4_file, fi_inode),
719 .key_offset = offsetof(struct nfs4_file, fi_inode),
720 .head_offset = offsetof(struct nfs4_file, fi_rlist),
721
722 /*
723 * Start with a single page hash table to reduce resizing churn
724 * on light workloads.
725 */
726 .min_size = 256,
727 .automatic_shrinking = true,
728 };
729
730 /*
731 * Check if courtesy clients have conflicting access and resolve it if possible
732 *
733 * access: is op_share_access if share_access is true.
734 * Check if access mode, op_share_access, would conflict with
735 * the current deny mode of the file 'fp'.
736 * access: is op_share_deny if share_access is false.
737 * Check if the deny mode, op_share_deny, would conflict with
738 * current access of the file 'fp'.
739 * stp: skip checking this entry.
740 * new_stp: normal open, not open upgrade.
741 *
742 * Function returns:
743 * false - access/deny mode conflict with normal client.
744 * true - no conflict or conflict with courtesy client(s) is resolved.
745 */
746 static bool
nfs4_resolve_deny_conflicts_locked(struct nfs4_file * fp,bool new_stp,struct nfs4_ol_stateid * stp,u32 access,bool share_access)747 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
748 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
749 {
750 struct nfs4_ol_stateid *st;
751 bool resolvable = true;
752 unsigned char bmap;
753 struct nfsd_net *nn;
754 struct nfs4_client *clp;
755
756 lockdep_assert_held(&fp->fi_lock);
757 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
758 /* ignore lock stateid */
759 if (st->st_openstp)
760 continue;
761 if (st == stp && new_stp)
762 continue;
763 /* check file access against deny mode or vice versa */
764 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
765 if (!(access & bmap_to_share_mode(bmap)))
766 continue;
767 clp = st->st_stid.sc_client;
768 if (try_to_expire_client(clp))
769 continue;
770 resolvable = false;
771 break;
772 }
773 if (resolvable) {
774 clp = stp->st_stid.sc_client;
775 nn = net_generic(clp->net, nfsd_net_id);
776 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
777 }
778 return resolvable;
779 }
780
781 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)782 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
783 {
784 lockdep_assert_held(&fp->fi_lock);
785
786 if (access & NFS4_SHARE_ACCESS_WRITE)
787 atomic_inc(&fp->fi_access[O_WRONLY]);
788 if (access & NFS4_SHARE_ACCESS_READ)
789 atomic_inc(&fp->fi_access[O_RDONLY]);
790 }
791
792 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)793 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
794 {
795 lockdep_assert_held(&fp->fi_lock);
796
797 /* Does this access mode make sense? */
798 if (access & ~NFS4_SHARE_ACCESS_BOTH)
799 return nfserr_inval;
800
801 /* Does it conflict with a deny mode already set? */
802 if ((access & fp->fi_share_deny) != 0)
803 return nfserr_share_denied;
804
805 __nfs4_file_get_access(fp, access);
806 return nfs_ok;
807 }
808
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)809 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
810 {
811 /* Common case is that there is no deny mode. */
812 if (deny) {
813 /* Does this deny mode make sense? */
814 if (deny & ~NFS4_SHARE_DENY_BOTH)
815 return nfserr_inval;
816
817 if ((deny & NFS4_SHARE_DENY_READ) &&
818 atomic_read(&fp->fi_access[O_RDONLY]))
819 return nfserr_share_denied;
820
821 if ((deny & NFS4_SHARE_DENY_WRITE) &&
822 atomic_read(&fp->fi_access[O_WRONLY]))
823 return nfserr_share_denied;
824 }
825 return nfs_ok;
826 }
827
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)828 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
829 {
830 might_lock(&fp->fi_lock);
831
832 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
833 struct nfsd_file *f1 = NULL;
834 struct nfsd_file *f2 = NULL;
835
836 swap(f1, fp->fi_fds[oflag]);
837 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
838 swap(f2, fp->fi_fds[O_RDWR]);
839 spin_unlock(&fp->fi_lock);
840 if (f1)
841 nfsd_file_put(f1);
842 if (f2)
843 nfsd_file_put(f2);
844 }
845 }
846
nfs4_file_put_access(struct nfs4_file * fp,u32 access)847 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
848 {
849 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
850
851 if (access & NFS4_SHARE_ACCESS_WRITE)
852 __nfs4_file_put_access(fp, O_WRONLY);
853 if (access & NFS4_SHARE_ACCESS_READ)
854 __nfs4_file_put_access(fp, O_RDONLY);
855 }
856
857 /*
858 * Allocate a new open/delegation state counter. This is needed for
859 * pNFS for proper return on close semantics.
860 *
861 * Note that we only allocate it for pNFS-enabled exports, otherwise
862 * all pointers to struct nfs4_clnt_odstate are always NULL.
863 */
864 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)865 alloc_clnt_odstate(struct nfs4_client *clp)
866 {
867 struct nfs4_clnt_odstate *co;
868
869 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
870 if (co) {
871 co->co_client = clp;
872 refcount_set(&co->co_odcount, 1);
873 }
874 return co;
875 }
876
877 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)878 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
879 {
880 struct nfs4_file *fp = co->co_file;
881
882 lockdep_assert_held(&fp->fi_lock);
883 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
884 }
885
886 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)887 get_clnt_odstate(struct nfs4_clnt_odstate *co)
888 {
889 if (co)
890 refcount_inc(&co->co_odcount);
891 }
892
893 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)894 put_clnt_odstate(struct nfs4_clnt_odstate *co)
895 {
896 struct nfs4_file *fp;
897
898 if (!co)
899 return;
900
901 fp = co->co_file;
902 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
903 list_del(&co->co_perfile);
904 spin_unlock(&fp->fi_lock);
905
906 nfsd4_return_all_file_layouts(co->co_client, fp);
907 kmem_cache_free(odstate_slab, co);
908 }
909 }
910
911 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)912 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
913 {
914 struct nfs4_clnt_odstate *co;
915 struct nfs4_client *cl;
916
917 if (!new)
918 return NULL;
919
920 cl = new->co_client;
921
922 spin_lock(&fp->fi_lock);
923 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
924 if (co->co_client == cl) {
925 get_clnt_odstate(co);
926 goto out;
927 }
928 }
929 co = new;
930 co->co_file = fp;
931 hash_clnt_odstate_locked(new);
932 out:
933 spin_unlock(&fp->fi_lock);
934 return co;
935 }
936
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))937 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
938 void (*sc_free)(struct nfs4_stid *))
939 {
940 struct nfs4_stid *stid;
941 int new_id;
942
943 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
944 if (!stid)
945 return NULL;
946
947 idr_preload(GFP_KERNEL);
948 spin_lock(&cl->cl_lock);
949 /* Reserving 0 for start of file in nfsdfs "states" file: */
950 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
951 spin_unlock(&cl->cl_lock);
952 idr_preload_end();
953 if (new_id < 0)
954 goto out_free;
955
956 stid->sc_free = sc_free;
957 stid->sc_client = cl;
958 stid->sc_stateid.si_opaque.so_id = new_id;
959 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
960 /* Will be incremented before return to client: */
961 refcount_set(&stid->sc_count, 1);
962 spin_lock_init(&stid->sc_lock);
963 INIT_LIST_HEAD(&stid->sc_cp_list);
964
965 /*
966 * It shouldn't be a problem to reuse an opaque stateid value.
967 * I don't think it is for 4.1. But with 4.0 I worry that, for
968 * example, a stray write retransmission could be accepted by
969 * the server when it should have been rejected. Therefore,
970 * adopt a trick from the sctp code to attempt to maximize the
971 * amount of time until an id is reused, by ensuring they always
972 * "increase" (mod INT_MAX):
973 */
974 return stid;
975 out_free:
976 kmem_cache_free(slab, stid);
977 return NULL;
978 }
979
980 /*
981 * Create a unique stateid_t to represent each COPY.
982 */
nfs4_init_cp_state(struct nfsd_net * nn,copy_stateid_t * stid,unsigned char cs_type)983 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
984 unsigned char cs_type)
985 {
986 int new_id;
987
988 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
989 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
990
991 idr_preload(GFP_KERNEL);
992 spin_lock(&nn->s2s_cp_lock);
993 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
994 stid->cs_stid.si_opaque.so_id = new_id;
995 stid->cs_stid.si_generation = 1;
996 spin_unlock(&nn->s2s_cp_lock);
997 idr_preload_end();
998 if (new_id < 0)
999 return 0;
1000 stid->cs_type = cs_type;
1001 return 1;
1002 }
1003
nfs4_init_copy_state(struct nfsd_net * nn,struct nfsd4_copy * copy)1004 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
1005 {
1006 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
1007 }
1008
nfs4_alloc_init_cpntf_state(struct nfsd_net * nn,struct nfs4_stid * p_stid)1009 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1010 struct nfs4_stid *p_stid)
1011 {
1012 struct nfs4_cpntf_state *cps;
1013
1014 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1015 if (!cps)
1016 return NULL;
1017 cps->cpntf_time = ktime_get_boottime_seconds();
1018 refcount_set(&cps->cp_stateid.cs_count, 1);
1019 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1020 goto out_free;
1021 spin_lock(&nn->s2s_cp_lock);
1022 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1023 spin_unlock(&nn->s2s_cp_lock);
1024 return cps;
1025 out_free:
1026 kfree(cps);
1027 return NULL;
1028 }
1029
nfs4_free_copy_state(struct nfsd4_copy * copy)1030 void nfs4_free_copy_state(struct nfsd4_copy *copy)
1031 {
1032 struct nfsd_net *nn;
1033
1034 if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1035 return;
1036 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1037 spin_lock(&nn->s2s_cp_lock);
1038 idr_remove(&nn->s2s_cp_stateids,
1039 copy->cp_stateid.cs_stid.si_opaque.so_id);
1040 spin_unlock(&nn->s2s_cp_lock);
1041 }
1042
nfs4_free_cpntf_statelist(struct net * net,struct nfs4_stid * stid)1043 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1044 {
1045 struct nfs4_cpntf_state *cps;
1046 struct nfsd_net *nn;
1047
1048 nn = net_generic(net, nfsd_net_id);
1049 spin_lock(&nn->s2s_cp_lock);
1050 while (!list_empty(&stid->sc_cp_list)) {
1051 cps = list_first_entry(&stid->sc_cp_list,
1052 struct nfs4_cpntf_state, cp_list);
1053 _free_cpntf_state_locked(nn, cps);
1054 }
1055 spin_unlock(&nn->s2s_cp_lock);
1056 }
1057
nfs4_alloc_open_stateid(struct nfs4_client * clp)1058 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1059 {
1060 struct nfs4_stid *stid;
1061
1062 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1063 if (!stid)
1064 return NULL;
1065
1066 return openlockstateid(stid);
1067 }
1068
nfs4_free_deleg(struct nfs4_stid * stid)1069 static void nfs4_free_deleg(struct nfs4_stid *stid)
1070 {
1071 struct nfs4_delegation *dp = delegstateid(stid);
1072
1073 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1074 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1075 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1076 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1077 kmem_cache_free(deleg_slab, stid);
1078 atomic_long_dec(&num_delegations);
1079 }
1080
1081 /*
1082 * When we recall a delegation, we should be careful not to hand it
1083 * out again straight away.
1084 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1085 * in which the filehandles of recalled delegations are "stored".
1086 * If a filehandle appear in either filter, a delegation is blocked.
1087 * When a delegation is recalled, the filehandle is stored in the "new"
1088 * filter.
1089 * Every 30 seconds we swap the filters and clear the "new" one,
1090 * unless both are empty of course. This results in delegations for a
1091 * given filehandle being blocked for between 30 and 60 seconds.
1092 *
1093 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1094 * low 3 bytes as hash-table indices.
1095 *
1096 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1097 * is used to manage concurrent access. Testing does not need the lock
1098 * except when swapping the two filters.
1099 */
1100 static DEFINE_SPINLOCK(blocked_delegations_lock);
1101 static struct bloom_pair {
1102 int entries, old_entries;
1103 time64_t swap_time;
1104 int new; /* index into 'set' */
1105 DECLARE_BITMAP(set[2], 256);
1106 } blocked_delegations;
1107
delegation_blocked(struct knfsd_fh * fh)1108 static int delegation_blocked(struct knfsd_fh *fh)
1109 {
1110 u32 hash;
1111 struct bloom_pair *bd = &blocked_delegations;
1112
1113 if (bd->entries == 0)
1114 return 0;
1115 if (ktime_get_seconds() - bd->swap_time > 30) {
1116 spin_lock(&blocked_delegations_lock);
1117 if (ktime_get_seconds() - bd->swap_time > 30) {
1118 bd->entries -= bd->old_entries;
1119 bd->old_entries = bd->entries;
1120 bd->new = 1-bd->new;
1121 memset(bd->set[bd->new], 0,
1122 sizeof(bd->set[0]));
1123 bd->swap_time = ktime_get_seconds();
1124 }
1125 spin_unlock(&blocked_delegations_lock);
1126 }
1127 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1128 if (test_bit(hash&255, bd->set[0]) &&
1129 test_bit((hash>>8)&255, bd->set[0]) &&
1130 test_bit((hash>>16)&255, bd->set[0]))
1131 return 1;
1132
1133 if (test_bit(hash&255, bd->set[1]) &&
1134 test_bit((hash>>8)&255, bd->set[1]) &&
1135 test_bit((hash>>16)&255, bd->set[1]))
1136 return 1;
1137
1138 return 0;
1139 }
1140
block_delegations(struct knfsd_fh * fh)1141 static void block_delegations(struct knfsd_fh *fh)
1142 {
1143 u32 hash;
1144 struct bloom_pair *bd = &blocked_delegations;
1145
1146 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1147
1148 spin_lock(&blocked_delegations_lock);
1149 __set_bit(hash&255, bd->set[bd->new]);
1150 __set_bit((hash>>8)&255, bd->set[bd->new]);
1151 __set_bit((hash>>16)&255, bd->set[bd->new]);
1152 if (bd->entries == 0)
1153 bd->swap_time = ktime_get_seconds();
1154 bd->entries += 1;
1155 spin_unlock(&blocked_delegations_lock);
1156 }
1157
1158 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate,u32 dl_type)1159 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1160 struct nfs4_clnt_odstate *odstate, u32 dl_type)
1161 {
1162 struct nfs4_delegation *dp;
1163 long n;
1164
1165 dprintk("NFSD alloc_init_deleg\n");
1166 n = atomic_long_inc_return(&num_delegations);
1167 if (n < 0 || n > max_delegations)
1168 goto out_dec;
1169 if (delegation_blocked(&fp->fi_fhandle))
1170 goto out_dec;
1171 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1172 if (dp == NULL)
1173 goto out_dec;
1174
1175 /*
1176 * delegation seqid's are never incremented. The 4.1 special
1177 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1178 * 0 anyway just for consistency and use 1:
1179 */
1180 dp->dl_stid.sc_stateid.si_generation = 1;
1181 INIT_LIST_HEAD(&dp->dl_perfile);
1182 INIT_LIST_HEAD(&dp->dl_perclnt);
1183 INIT_LIST_HEAD(&dp->dl_recall_lru);
1184 dp->dl_clnt_odstate = odstate;
1185 get_clnt_odstate(odstate);
1186 dp->dl_type = dl_type;
1187 dp->dl_retries = 1;
1188 dp->dl_recalled = false;
1189 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1190 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1191 get_nfs4_file(fp);
1192 dp->dl_stid.sc_file = fp;
1193 return dp;
1194 out_dec:
1195 atomic_long_dec(&num_delegations);
1196 return NULL;
1197 }
1198
1199 void
nfs4_put_stid(struct nfs4_stid * s)1200 nfs4_put_stid(struct nfs4_stid *s)
1201 {
1202 struct nfs4_file *fp = s->sc_file;
1203 struct nfs4_client *clp = s->sc_client;
1204
1205 might_lock(&clp->cl_lock);
1206
1207 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1208 wake_up_all(&close_wq);
1209 return;
1210 }
1211 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1212 nfs4_free_cpntf_statelist(clp->net, s);
1213 spin_unlock(&clp->cl_lock);
1214 s->sc_free(s);
1215 if (fp)
1216 put_nfs4_file(fp);
1217 }
1218
1219 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)1220 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1221 {
1222 stateid_t *src = &stid->sc_stateid;
1223
1224 spin_lock(&stid->sc_lock);
1225 if (unlikely(++src->si_generation == 0))
1226 src->si_generation = 1;
1227 memcpy(dst, src, sizeof(*dst));
1228 spin_unlock(&stid->sc_lock);
1229 }
1230
put_deleg_file(struct nfs4_file * fp)1231 static void put_deleg_file(struct nfs4_file *fp)
1232 {
1233 struct nfsd_file *nf = NULL;
1234
1235 spin_lock(&fp->fi_lock);
1236 if (--fp->fi_delegees == 0)
1237 swap(nf, fp->fi_deleg_file);
1238 spin_unlock(&fp->fi_lock);
1239
1240 if (nf)
1241 nfsd_file_put(nf);
1242 }
1243
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)1244 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1245 {
1246 struct nfs4_file *fp = dp->dl_stid.sc_file;
1247 struct nfsd_file *nf = fp->fi_deleg_file;
1248
1249 WARN_ON_ONCE(!fp->fi_delegees);
1250
1251 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1252 put_deleg_file(fp);
1253 }
1254
destroy_unhashed_deleg(struct nfs4_delegation * dp)1255 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1256 {
1257 put_clnt_odstate(dp->dl_clnt_odstate);
1258 nfs4_unlock_deleg_lease(dp);
1259 nfs4_put_stid(&dp->dl_stid);
1260 }
1261
nfs4_unhash_stid(struct nfs4_stid * s)1262 void nfs4_unhash_stid(struct nfs4_stid *s)
1263 {
1264 s->sc_type = 0;
1265 }
1266
1267 /**
1268 * nfs4_delegation_exists - Discover if this delegation already exists
1269 * @clp: a pointer to the nfs4_client we're granting a delegation to
1270 * @fp: a pointer to the nfs4_file we're granting a delegation on
1271 *
1272 * Return:
1273 * On success: true iff an existing delegation is found
1274 */
1275
1276 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)1277 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1278 {
1279 struct nfs4_delegation *searchdp = NULL;
1280 struct nfs4_client *searchclp = NULL;
1281
1282 lockdep_assert_held(&state_lock);
1283 lockdep_assert_held(&fp->fi_lock);
1284
1285 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1286 searchclp = searchdp->dl_stid.sc_client;
1287 if (clp == searchclp) {
1288 return true;
1289 }
1290 }
1291 return false;
1292 }
1293
1294 /**
1295 * hash_delegation_locked - Add a delegation to the appropriate lists
1296 * @dp: a pointer to the nfs4_delegation we are adding.
1297 * @fp: a pointer to the nfs4_file we're granting a delegation on
1298 *
1299 * Return:
1300 * On success: NULL if the delegation was successfully hashed.
1301 *
1302 * On error: -EAGAIN if one was previously granted to this
1303 * nfs4_client for this nfs4_file. Delegation is not hashed.
1304 *
1305 */
1306
1307 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)1308 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1309 {
1310 struct nfs4_client *clp = dp->dl_stid.sc_client;
1311
1312 lockdep_assert_held(&state_lock);
1313 lockdep_assert_held(&fp->fi_lock);
1314
1315 if (nfs4_delegation_exists(clp, fp))
1316 return -EAGAIN;
1317 refcount_inc(&dp->dl_stid.sc_count);
1318 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1319 list_add(&dp->dl_perfile, &fp->fi_delegations);
1320 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1321 return 0;
1322 }
1323
delegation_hashed(struct nfs4_delegation * dp)1324 static bool delegation_hashed(struct nfs4_delegation *dp)
1325 {
1326 return !(list_empty(&dp->dl_perfile));
1327 }
1328
1329 static bool
unhash_delegation_locked(struct nfs4_delegation * dp)1330 unhash_delegation_locked(struct nfs4_delegation *dp)
1331 {
1332 struct nfs4_file *fp = dp->dl_stid.sc_file;
1333
1334 lockdep_assert_held(&state_lock);
1335
1336 if (!delegation_hashed(dp))
1337 return false;
1338
1339 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1340 /* Ensure that deleg break won't try to requeue it */
1341 ++dp->dl_time;
1342 spin_lock(&fp->fi_lock);
1343 list_del_init(&dp->dl_perclnt);
1344 list_del_init(&dp->dl_recall_lru);
1345 list_del_init(&dp->dl_perfile);
1346 spin_unlock(&fp->fi_lock);
1347 return true;
1348 }
1349
destroy_delegation(struct nfs4_delegation * dp)1350 static void destroy_delegation(struct nfs4_delegation *dp)
1351 {
1352 bool unhashed;
1353
1354 spin_lock(&state_lock);
1355 unhashed = unhash_delegation_locked(dp);
1356 spin_unlock(&state_lock);
1357 if (unhashed)
1358 destroy_unhashed_deleg(dp);
1359 }
1360
revoke_delegation(struct nfs4_delegation * dp)1361 static void revoke_delegation(struct nfs4_delegation *dp)
1362 {
1363 struct nfs4_client *clp = dp->dl_stid.sc_client;
1364
1365 WARN_ON(!list_empty(&dp->dl_recall_lru));
1366
1367 trace_nfsd_stid_revoke(&dp->dl_stid);
1368
1369 if (clp->cl_minorversion) {
1370 spin_lock(&clp->cl_lock);
1371 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1372 refcount_inc(&dp->dl_stid.sc_count);
1373 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1374 spin_unlock(&clp->cl_lock);
1375 }
1376 destroy_unhashed_deleg(dp);
1377 }
1378
1379 /*
1380 * SETCLIENTID state
1381 */
1382
clientid_hashval(u32 id)1383 static unsigned int clientid_hashval(u32 id)
1384 {
1385 return id & CLIENT_HASH_MASK;
1386 }
1387
clientstr_hashval(struct xdr_netobj name)1388 static unsigned int clientstr_hashval(struct xdr_netobj name)
1389 {
1390 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1391 }
1392
1393 /*
1394 * A stateid that had a deny mode associated with it is being released
1395 * or downgraded. Recalculate the deny mode on the file.
1396 */
1397 static void
recalculate_deny_mode(struct nfs4_file * fp)1398 recalculate_deny_mode(struct nfs4_file *fp)
1399 {
1400 struct nfs4_ol_stateid *stp;
1401
1402 spin_lock(&fp->fi_lock);
1403 fp->fi_share_deny = 0;
1404 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1405 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1406 spin_unlock(&fp->fi_lock);
1407 }
1408
1409 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1410 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1411 {
1412 int i;
1413 bool change = false;
1414
1415 for (i = 1; i < 4; i++) {
1416 if ((i & deny) != i) {
1417 change = true;
1418 clear_deny(i, stp);
1419 }
1420 }
1421
1422 /* Recalculate per-file deny mode if there was a change */
1423 if (change)
1424 recalculate_deny_mode(stp->st_stid.sc_file);
1425 }
1426
1427 /* release all access and file references for a given stateid */
1428 static void
release_all_access(struct nfs4_ol_stateid * stp)1429 release_all_access(struct nfs4_ol_stateid *stp)
1430 {
1431 int i;
1432 struct nfs4_file *fp = stp->st_stid.sc_file;
1433
1434 if (fp && stp->st_deny_bmap != 0)
1435 recalculate_deny_mode(fp);
1436
1437 for (i = 1; i < 4; i++) {
1438 if (test_access(i, stp))
1439 nfs4_file_put_access(stp->st_stid.sc_file, i);
1440 clear_access(i, stp);
1441 }
1442 }
1443
nfs4_free_stateowner(struct nfs4_stateowner * sop)1444 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1445 {
1446 kfree(sop->so_owner.data);
1447 sop->so_ops->so_free(sop);
1448 }
1449
nfs4_put_stateowner(struct nfs4_stateowner * sop)1450 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1451 {
1452 struct nfs4_client *clp = sop->so_client;
1453
1454 might_lock(&clp->cl_lock);
1455
1456 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1457 return;
1458 sop->so_ops->so_unhash(sop);
1459 spin_unlock(&clp->cl_lock);
1460 nfs4_free_stateowner(sop);
1461 }
1462
1463 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1464 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1465 {
1466 return list_empty(&stp->st_perfile);
1467 }
1468
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1469 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1470 {
1471 struct nfs4_file *fp = stp->st_stid.sc_file;
1472
1473 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1474
1475 if (list_empty(&stp->st_perfile))
1476 return false;
1477
1478 spin_lock(&fp->fi_lock);
1479 list_del_init(&stp->st_perfile);
1480 spin_unlock(&fp->fi_lock);
1481 list_del(&stp->st_perstateowner);
1482 return true;
1483 }
1484
nfs4_free_ol_stateid(struct nfs4_stid * stid)1485 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1486 {
1487 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1488
1489 put_clnt_odstate(stp->st_clnt_odstate);
1490 release_all_access(stp);
1491 if (stp->st_stateowner)
1492 nfs4_put_stateowner(stp->st_stateowner);
1493 WARN_ON(!list_empty(&stid->sc_cp_list));
1494 kmem_cache_free(stateid_slab, stid);
1495 }
1496
nfs4_free_lock_stateid(struct nfs4_stid * stid)1497 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1498 {
1499 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1500 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1501 struct nfsd_file *nf;
1502
1503 nf = find_any_file(stp->st_stid.sc_file);
1504 if (nf) {
1505 get_file(nf->nf_file);
1506 filp_close(nf->nf_file, (fl_owner_t)lo);
1507 nfsd_file_put(nf);
1508 }
1509 nfs4_free_ol_stateid(stid);
1510 }
1511
1512 /*
1513 * Put the persistent reference to an already unhashed generic stateid, while
1514 * holding the cl_lock. If it's the last reference, then put it onto the
1515 * reaplist for later destruction.
1516 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1517 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1518 struct list_head *reaplist)
1519 {
1520 struct nfs4_stid *s = &stp->st_stid;
1521 struct nfs4_client *clp = s->sc_client;
1522
1523 lockdep_assert_held(&clp->cl_lock);
1524
1525 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1526
1527 if (!refcount_dec_and_test(&s->sc_count)) {
1528 wake_up_all(&close_wq);
1529 return;
1530 }
1531
1532 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1533 list_add(&stp->st_locks, reaplist);
1534 }
1535
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1536 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1537 {
1538 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1539
1540 if (!unhash_ol_stateid(stp))
1541 return false;
1542 list_del_init(&stp->st_locks);
1543 nfs4_unhash_stid(&stp->st_stid);
1544 return true;
1545 }
1546
release_lock_stateid(struct nfs4_ol_stateid * stp)1547 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1548 {
1549 struct nfs4_client *clp = stp->st_stid.sc_client;
1550 bool unhashed;
1551
1552 spin_lock(&clp->cl_lock);
1553 unhashed = unhash_lock_stateid(stp);
1554 spin_unlock(&clp->cl_lock);
1555 if (unhashed)
1556 nfs4_put_stid(&stp->st_stid);
1557 }
1558
unhash_lockowner_locked(struct nfs4_lockowner * lo)1559 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1560 {
1561 struct nfs4_client *clp = lo->lo_owner.so_client;
1562
1563 lockdep_assert_held(&clp->cl_lock);
1564
1565 list_del_init(&lo->lo_owner.so_strhash);
1566 }
1567
1568 /*
1569 * Free a list of generic stateids that were collected earlier after being
1570 * fully unhashed.
1571 */
1572 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1573 free_ol_stateid_reaplist(struct list_head *reaplist)
1574 {
1575 struct nfs4_ol_stateid *stp;
1576 struct nfs4_file *fp;
1577
1578 might_sleep();
1579
1580 while (!list_empty(reaplist)) {
1581 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1582 st_locks);
1583 list_del(&stp->st_locks);
1584 fp = stp->st_stid.sc_file;
1585 stp->st_stid.sc_free(&stp->st_stid);
1586 if (fp)
1587 put_nfs4_file(fp);
1588 }
1589 }
1590
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1591 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1592 struct list_head *reaplist)
1593 {
1594 struct nfs4_ol_stateid *stp;
1595
1596 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1597
1598 while (!list_empty(&open_stp->st_locks)) {
1599 stp = list_entry(open_stp->st_locks.next,
1600 struct nfs4_ol_stateid, st_locks);
1601 WARN_ON(!unhash_lock_stateid(stp));
1602 put_ol_stateid_locked(stp, reaplist);
1603 }
1604 }
1605
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1606 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1607 struct list_head *reaplist)
1608 {
1609 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1610
1611 if (!unhash_ol_stateid(stp))
1612 return false;
1613 release_open_stateid_locks(stp, reaplist);
1614 return true;
1615 }
1616
release_open_stateid(struct nfs4_ol_stateid * stp)1617 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1618 {
1619 LIST_HEAD(reaplist);
1620
1621 spin_lock(&stp->st_stid.sc_client->cl_lock);
1622 if (unhash_open_stateid(stp, &reaplist))
1623 put_ol_stateid_locked(stp, &reaplist);
1624 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1625 free_ol_stateid_reaplist(&reaplist);
1626 }
1627
unhash_openowner_locked(struct nfs4_openowner * oo)1628 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1629 {
1630 struct nfs4_client *clp = oo->oo_owner.so_client;
1631
1632 lockdep_assert_held(&clp->cl_lock);
1633
1634 list_del_init(&oo->oo_owner.so_strhash);
1635 list_del_init(&oo->oo_perclient);
1636 }
1637
release_last_closed_stateid(struct nfs4_openowner * oo)1638 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1639 {
1640 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1641 nfsd_net_id);
1642 struct nfs4_ol_stateid *s;
1643
1644 spin_lock(&nn->client_lock);
1645 s = oo->oo_last_closed_stid;
1646 if (s) {
1647 list_del_init(&oo->oo_close_lru);
1648 oo->oo_last_closed_stid = NULL;
1649 }
1650 spin_unlock(&nn->client_lock);
1651 if (s)
1652 nfs4_put_stid(&s->st_stid);
1653 }
1654
release_openowner(struct nfs4_openowner * oo)1655 static void release_openowner(struct nfs4_openowner *oo)
1656 {
1657 struct nfs4_ol_stateid *stp;
1658 struct nfs4_client *clp = oo->oo_owner.so_client;
1659 struct list_head reaplist;
1660
1661 INIT_LIST_HEAD(&reaplist);
1662
1663 spin_lock(&clp->cl_lock);
1664 unhash_openowner_locked(oo);
1665 while (!list_empty(&oo->oo_owner.so_stateids)) {
1666 stp = list_first_entry(&oo->oo_owner.so_stateids,
1667 struct nfs4_ol_stateid, st_perstateowner);
1668 if (unhash_open_stateid(stp, &reaplist))
1669 put_ol_stateid_locked(stp, &reaplist);
1670 }
1671 spin_unlock(&clp->cl_lock);
1672 free_ol_stateid_reaplist(&reaplist);
1673 release_last_closed_stateid(oo);
1674 nfs4_put_stateowner(&oo->oo_owner);
1675 }
1676
1677 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1678 hash_sessionid(struct nfs4_sessionid *sessionid)
1679 {
1680 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1681
1682 return sid->sequence % SESSION_HASH_SIZE;
1683 }
1684
1685 #ifdef CONFIG_SUNRPC_DEBUG
1686 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1687 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1688 {
1689 u32 *ptr = (u32 *)(&sessionid->data[0]);
1690 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1691 }
1692 #else
1693 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1694 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1695 {
1696 }
1697 #endif
1698
1699 /*
1700 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1701 * won't be used for replay.
1702 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1703 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1704 {
1705 struct nfs4_stateowner *so = cstate->replay_owner;
1706
1707 if (nfserr == nfserr_replay_me)
1708 return;
1709
1710 if (!seqid_mutating_err(ntohl(nfserr))) {
1711 nfsd4_cstate_clear_replay(cstate);
1712 return;
1713 }
1714 if (!so)
1715 return;
1716 if (so->so_is_open_owner)
1717 release_last_closed_stateid(openowner(so));
1718 so->so_seqid++;
1719 return;
1720 }
1721
1722 static void
gen_sessionid(struct nfsd4_session * ses)1723 gen_sessionid(struct nfsd4_session *ses)
1724 {
1725 struct nfs4_client *clp = ses->se_client;
1726 struct nfsd4_sessionid *sid;
1727
1728 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1729 sid->clientid = clp->cl_clientid;
1730 sid->sequence = current_sessionid++;
1731 sid->reserved = 0;
1732 }
1733
1734 /*
1735 * The protocol defines ca_maxresponssize_cached to include the size of
1736 * the rpc header, but all we need to cache is the data starting after
1737 * the end of the initial SEQUENCE operation--the rest we regenerate
1738 * each time. Therefore we can advertise a ca_maxresponssize_cached
1739 * value that is the number of bytes in our cache plus a few additional
1740 * bytes. In order to stay on the safe side, and not promise more than
1741 * we can cache, those additional bytes must be the minimum possible: 24
1742 * bytes of rpc header (xid through accept state, with AUTH_NULL
1743 * verifier), 12 for the compound header (with zero-length tag), and 44
1744 * for the SEQUENCE op response:
1745 */
1746 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1747
1748 static void
free_session_slots(struct nfsd4_session * ses)1749 free_session_slots(struct nfsd4_session *ses)
1750 {
1751 int i;
1752
1753 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1754 free_svc_cred(&ses->se_slots[i]->sl_cred);
1755 kfree(ses->se_slots[i]);
1756 }
1757 }
1758
1759 /*
1760 * We don't actually need to cache the rpc and session headers, so we
1761 * can allocate a little less for each slot:
1762 */
slot_bytes(struct nfsd4_channel_attrs * ca)1763 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1764 {
1765 u32 size;
1766
1767 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1768 size = 0;
1769 else
1770 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1771 return size + sizeof(struct nfsd4_slot);
1772 }
1773
1774 /*
1775 * XXX: If we run out of reserved DRC memory we could (up to a point)
1776 * re-negotiate active sessions and reduce their slot usage to make
1777 * room for new connections. For now we just fail the create session.
1778 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)1779 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1780 {
1781 u32 slotsize = slot_bytes(ca);
1782 u32 num = ca->maxreqs;
1783 unsigned long avail, total_avail;
1784 unsigned int scale_factor;
1785
1786 spin_lock(&nfsd_drc_lock);
1787 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1788 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1789 else
1790 /* We have handed out more space than we chose in
1791 * set_max_drc() to allow. That isn't really a
1792 * problem as long as that doesn't make us think we
1793 * have lots more due to integer overflow.
1794 */
1795 total_avail = 0;
1796 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1797 /*
1798 * Never use more than a fraction of the remaining memory,
1799 * unless it's the only way to give this client a slot.
1800 * The chosen fraction is either 1/8 or 1/number of threads,
1801 * whichever is smaller. This ensures there are adequate
1802 * slots to support multiple clients per thread.
1803 * Give the client one slot even if that would require
1804 * over-allocation--it is better than failure.
1805 */
1806 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1807
1808 avail = clamp_t(unsigned long, avail, slotsize,
1809 total_avail/scale_factor);
1810 num = min_t(int, num, avail / slotsize);
1811 num = max_t(int, num, 1);
1812 nfsd_drc_mem_used += num * slotsize;
1813 spin_unlock(&nfsd_drc_lock);
1814
1815 return num;
1816 }
1817
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1818 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1819 {
1820 int slotsize = slot_bytes(ca);
1821
1822 spin_lock(&nfsd_drc_lock);
1823 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1824 spin_unlock(&nfsd_drc_lock);
1825 }
1826
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)1827 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1828 struct nfsd4_channel_attrs *battrs)
1829 {
1830 int numslots = fattrs->maxreqs;
1831 int slotsize = slot_bytes(fattrs);
1832 struct nfsd4_session *new;
1833 int i;
1834
1835 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
1836 > PAGE_SIZE);
1837
1838 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
1839 if (!new)
1840 return NULL;
1841 /* allocate each struct nfsd4_slot and data cache in one piece */
1842 for (i = 0; i < numslots; i++) {
1843 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1844 if (!new->se_slots[i])
1845 goto out_free;
1846 }
1847
1848 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1849 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1850
1851 return new;
1852 out_free:
1853 while (i--)
1854 kfree(new->se_slots[i]);
1855 kfree(new);
1856 return NULL;
1857 }
1858
free_conn(struct nfsd4_conn * c)1859 static void free_conn(struct nfsd4_conn *c)
1860 {
1861 svc_xprt_put(c->cn_xprt);
1862 kfree(c);
1863 }
1864
nfsd4_conn_lost(struct svc_xpt_user * u)1865 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1866 {
1867 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1868 struct nfs4_client *clp = c->cn_session->se_client;
1869
1870 trace_nfsd_cb_lost(clp);
1871
1872 spin_lock(&clp->cl_lock);
1873 if (!list_empty(&c->cn_persession)) {
1874 list_del(&c->cn_persession);
1875 free_conn(c);
1876 }
1877 nfsd4_probe_callback(clp);
1878 spin_unlock(&clp->cl_lock);
1879 }
1880
alloc_conn(struct svc_rqst * rqstp,u32 flags)1881 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1882 {
1883 struct nfsd4_conn *conn;
1884
1885 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1886 if (!conn)
1887 return NULL;
1888 svc_xprt_get(rqstp->rq_xprt);
1889 conn->cn_xprt = rqstp->rq_xprt;
1890 conn->cn_flags = flags;
1891 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1892 return conn;
1893 }
1894
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1895 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1896 {
1897 conn->cn_session = ses;
1898 list_add(&conn->cn_persession, &ses->se_conns);
1899 }
1900
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1901 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1902 {
1903 struct nfs4_client *clp = ses->se_client;
1904
1905 spin_lock(&clp->cl_lock);
1906 __nfsd4_hash_conn(conn, ses);
1907 spin_unlock(&clp->cl_lock);
1908 }
1909
nfsd4_register_conn(struct nfsd4_conn * conn)1910 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1911 {
1912 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1913 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1914 }
1915
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)1916 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1917 {
1918 int ret;
1919
1920 nfsd4_hash_conn(conn, ses);
1921 ret = nfsd4_register_conn(conn);
1922 if (ret)
1923 /* oops; xprt is already down: */
1924 nfsd4_conn_lost(&conn->cn_xpt_user);
1925 /* We may have gained or lost a callback channel: */
1926 nfsd4_probe_callback_sync(ses->se_client);
1927 }
1928
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)1929 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1930 {
1931 u32 dir = NFS4_CDFC4_FORE;
1932
1933 if (cses->flags & SESSION4_BACK_CHAN)
1934 dir |= NFS4_CDFC4_BACK;
1935 return alloc_conn(rqstp, dir);
1936 }
1937
1938 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)1939 static void nfsd4_del_conns(struct nfsd4_session *s)
1940 {
1941 struct nfs4_client *clp = s->se_client;
1942 struct nfsd4_conn *c;
1943
1944 spin_lock(&clp->cl_lock);
1945 while (!list_empty(&s->se_conns)) {
1946 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1947 list_del_init(&c->cn_persession);
1948 spin_unlock(&clp->cl_lock);
1949
1950 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1951 free_conn(c);
1952
1953 spin_lock(&clp->cl_lock);
1954 }
1955 spin_unlock(&clp->cl_lock);
1956 }
1957
__free_session(struct nfsd4_session * ses)1958 static void __free_session(struct nfsd4_session *ses)
1959 {
1960 free_session_slots(ses);
1961 kfree(ses);
1962 }
1963
free_session(struct nfsd4_session * ses)1964 static void free_session(struct nfsd4_session *ses)
1965 {
1966 nfsd4_del_conns(ses);
1967 nfsd4_put_drc_mem(&ses->se_fchannel);
1968 __free_session(ses);
1969 }
1970
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)1971 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1972 {
1973 int idx;
1974 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1975
1976 new->se_client = clp;
1977 gen_sessionid(new);
1978
1979 INIT_LIST_HEAD(&new->se_conns);
1980
1981 new->se_cb_seq_nr = 1;
1982 new->se_flags = cses->flags;
1983 new->se_cb_prog = cses->callback_prog;
1984 new->se_cb_sec = cses->cb_sec;
1985 atomic_set(&new->se_ref, 0);
1986 idx = hash_sessionid(&new->se_sessionid);
1987 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1988 spin_lock(&clp->cl_lock);
1989 list_add(&new->se_perclnt, &clp->cl_sessions);
1990 spin_unlock(&clp->cl_lock);
1991
1992 {
1993 struct sockaddr *sa = svc_addr(rqstp);
1994 /*
1995 * This is a little silly; with sessions there's no real
1996 * use for the callback address. Use the peer address
1997 * as a reasonable default for now, but consider fixing
1998 * the rpc client not to require an address in the
1999 * future:
2000 */
2001 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2002 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2003 }
2004 }
2005
2006 /* caller must hold client_lock */
2007 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)2008 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2009 {
2010 struct nfsd4_session *elem;
2011 int idx;
2012 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2013
2014 lockdep_assert_held(&nn->client_lock);
2015
2016 dump_sessionid(__func__, sessionid);
2017 idx = hash_sessionid(sessionid);
2018 /* Search in the appropriate list */
2019 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2020 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2021 NFS4_MAX_SESSIONID_LEN)) {
2022 return elem;
2023 }
2024 }
2025
2026 dprintk("%s: session not found\n", __func__);
2027 return NULL;
2028 }
2029
2030 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)2031 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2032 __be32 *ret)
2033 {
2034 struct nfsd4_session *session;
2035 __be32 status = nfserr_badsession;
2036
2037 session = __find_in_sessionid_hashtbl(sessionid, net);
2038 if (!session)
2039 goto out;
2040 status = nfsd4_get_session_locked(session);
2041 if (status)
2042 session = NULL;
2043 out:
2044 *ret = status;
2045 return session;
2046 }
2047
2048 /* caller must hold client_lock */
2049 static void
unhash_session(struct nfsd4_session * ses)2050 unhash_session(struct nfsd4_session *ses)
2051 {
2052 struct nfs4_client *clp = ses->se_client;
2053 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2054
2055 lockdep_assert_held(&nn->client_lock);
2056
2057 list_del(&ses->se_hash);
2058 spin_lock(&ses->se_client->cl_lock);
2059 list_del(&ses->se_perclnt);
2060 spin_unlock(&ses->se_client->cl_lock);
2061 }
2062
2063 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2064 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)2065 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2066 {
2067 /*
2068 * We're assuming the clid was not given out from a boot
2069 * precisely 2^32 (about 136 years) before this one. That seems
2070 * a safe assumption:
2071 */
2072 if (clid->cl_boot == (u32)nn->boot_time)
2073 return 0;
2074 trace_nfsd_clid_stale(clid);
2075 return 1;
2076 }
2077
2078 /*
2079 * XXX Should we use a slab cache ?
2080 * This type of memory management is somewhat inefficient, but we use it
2081 * anyway since SETCLIENTID is not a common operation.
2082 */
alloc_client(struct xdr_netobj name,struct nfsd_net * nn)2083 static struct nfs4_client *alloc_client(struct xdr_netobj name,
2084 struct nfsd_net *nn)
2085 {
2086 struct nfs4_client *clp;
2087 int i;
2088
2089 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2090 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2091 return NULL;
2092 }
2093 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2094 if (clp == NULL)
2095 return NULL;
2096 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2097 if (clp->cl_name.data == NULL)
2098 goto err_no_name;
2099 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2100 sizeof(struct list_head),
2101 GFP_KERNEL);
2102 if (!clp->cl_ownerstr_hashtbl)
2103 goto err_no_hashtbl;
2104 for (i = 0; i < OWNER_HASH_SIZE; i++)
2105 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2106 INIT_LIST_HEAD(&clp->cl_sessions);
2107 idr_init(&clp->cl_stateids);
2108 atomic_set(&clp->cl_rpc_users, 0);
2109 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2110 clp->cl_state = NFSD4_ACTIVE;
2111 atomic_inc(&nn->nfs4_client_count);
2112 atomic_set(&clp->cl_delegs_in_recall, 0);
2113 INIT_LIST_HEAD(&clp->cl_idhash);
2114 INIT_LIST_HEAD(&clp->cl_openowners);
2115 INIT_LIST_HEAD(&clp->cl_delegations);
2116 INIT_LIST_HEAD(&clp->cl_lru);
2117 INIT_LIST_HEAD(&clp->cl_revoked);
2118 #ifdef CONFIG_NFSD_PNFS
2119 INIT_LIST_HEAD(&clp->cl_lo_states);
2120 #endif
2121 INIT_LIST_HEAD(&clp->async_copies);
2122 spin_lock_init(&clp->async_lock);
2123 spin_lock_init(&clp->cl_lock);
2124 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2125 return clp;
2126 err_no_hashtbl:
2127 kfree(clp->cl_name.data);
2128 err_no_name:
2129 kmem_cache_free(client_slab, clp);
2130 return NULL;
2131 }
2132
__free_client(struct kref * k)2133 static void __free_client(struct kref *k)
2134 {
2135 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2136 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2137
2138 free_svc_cred(&clp->cl_cred);
2139 kfree(clp->cl_ownerstr_hashtbl);
2140 kfree(clp->cl_name.data);
2141 kfree(clp->cl_nii_domain.data);
2142 kfree(clp->cl_nii_name.data);
2143 idr_destroy(&clp->cl_stateids);
2144 kfree(clp->cl_ra);
2145 kmem_cache_free(client_slab, clp);
2146 }
2147
drop_client(struct nfs4_client * clp)2148 static void drop_client(struct nfs4_client *clp)
2149 {
2150 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2151 }
2152
2153 static void
free_client(struct nfs4_client * clp)2154 free_client(struct nfs4_client *clp)
2155 {
2156 while (!list_empty(&clp->cl_sessions)) {
2157 struct nfsd4_session *ses;
2158 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2159 se_perclnt);
2160 list_del(&ses->se_perclnt);
2161 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2162 free_session(ses);
2163 }
2164 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2165 if (clp->cl_nfsd_dentry) {
2166 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2167 clp->cl_nfsd_dentry = NULL;
2168 wake_up_all(&expiry_wq);
2169 }
2170 drop_client(clp);
2171 }
2172
2173 /* must be called under the client_lock */
2174 static void
unhash_client_locked(struct nfs4_client * clp)2175 unhash_client_locked(struct nfs4_client *clp)
2176 {
2177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2178 struct nfsd4_session *ses;
2179
2180 lockdep_assert_held(&nn->client_lock);
2181
2182 /* Mark the client as expired! */
2183 clp->cl_time = 0;
2184 /* Make it invisible */
2185 if (!list_empty(&clp->cl_idhash)) {
2186 list_del_init(&clp->cl_idhash);
2187 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2188 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2189 else
2190 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2191 }
2192 list_del_init(&clp->cl_lru);
2193 spin_lock(&clp->cl_lock);
2194 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2195 list_del_init(&ses->se_hash);
2196 spin_unlock(&clp->cl_lock);
2197 }
2198
2199 static void
unhash_client(struct nfs4_client * clp)2200 unhash_client(struct nfs4_client *clp)
2201 {
2202 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2203
2204 spin_lock(&nn->client_lock);
2205 unhash_client_locked(clp);
2206 spin_unlock(&nn->client_lock);
2207 }
2208
mark_client_expired_locked(struct nfs4_client * clp)2209 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2210 {
2211 if (atomic_read(&clp->cl_rpc_users))
2212 return nfserr_jukebox;
2213 unhash_client_locked(clp);
2214 return nfs_ok;
2215 }
2216
2217 static void
__destroy_client(struct nfs4_client * clp)2218 __destroy_client(struct nfs4_client *clp)
2219 {
2220 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2221 int i;
2222 struct nfs4_openowner *oo;
2223 struct nfs4_delegation *dp;
2224 struct list_head reaplist;
2225
2226 INIT_LIST_HEAD(&reaplist);
2227 spin_lock(&state_lock);
2228 while (!list_empty(&clp->cl_delegations)) {
2229 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2230 WARN_ON(!unhash_delegation_locked(dp));
2231 list_add(&dp->dl_recall_lru, &reaplist);
2232 }
2233 spin_unlock(&state_lock);
2234 while (!list_empty(&reaplist)) {
2235 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2236 list_del_init(&dp->dl_recall_lru);
2237 destroy_unhashed_deleg(dp);
2238 }
2239 while (!list_empty(&clp->cl_revoked)) {
2240 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2241 list_del_init(&dp->dl_recall_lru);
2242 nfs4_put_stid(&dp->dl_stid);
2243 }
2244 while (!list_empty(&clp->cl_openowners)) {
2245 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2246 nfs4_get_stateowner(&oo->oo_owner);
2247 release_openowner(oo);
2248 }
2249 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2250 struct nfs4_stateowner *so, *tmp;
2251
2252 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2253 so_strhash) {
2254 /* Should be no openowners at this point */
2255 WARN_ON_ONCE(so->so_is_open_owner);
2256 remove_blocked_locks(lockowner(so));
2257 }
2258 }
2259 nfsd4_return_all_client_layouts(clp);
2260 nfsd4_shutdown_copy(clp);
2261 nfsd4_shutdown_callback(clp);
2262 if (clp->cl_cb_conn.cb_xprt)
2263 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2264 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2265 nfsd4_dec_courtesy_client_count(nn, clp);
2266 free_client(clp);
2267 wake_up_all(&expiry_wq);
2268 }
2269
2270 static void
destroy_client(struct nfs4_client * clp)2271 destroy_client(struct nfs4_client *clp)
2272 {
2273 unhash_client(clp);
2274 __destroy_client(clp);
2275 }
2276
inc_reclaim_complete(struct nfs4_client * clp)2277 static void inc_reclaim_complete(struct nfs4_client *clp)
2278 {
2279 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2280
2281 if (!nn->track_reclaim_completes)
2282 return;
2283 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2284 return;
2285 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2286 nn->reclaim_str_hashtbl_size) {
2287 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2288 clp->net->ns.inum);
2289 nfsd4_end_grace(nn);
2290 }
2291 }
2292
expire_client(struct nfs4_client * clp)2293 static void expire_client(struct nfs4_client *clp)
2294 {
2295 unhash_client(clp);
2296 nfsd4_client_record_remove(clp);
2297 __destroy_client(clp);
2298 }
2299
copy_verf(struct nfs4_client * target,nfs4_verifier * source)2300 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2301 {
2302 memcpy(target->cl_verifier.data, source->data,
2303 sizeof(target->cl_verifier.data));
2304 }
2305
copy_clid(struct nfs4_client * target,struct nfs4_client * source)2306 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2307 {
2308 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2309 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2310 }
2311
copy_cred(struct svc_cred * target,struct svc_cred * source)2312 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2313 {
2314 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2315 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2316 GFP_KERNEL);
2317 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2318 if ((source->cr_principal && !target->cr_principal) ||
2319 (source->cr_raw_principal && !target->cr_raw_principal) ||
2320 (source->cr_targ_princ && !target->cr_targ_princ))
2321 return -ENOMEM;
2322
2323 target->cr_flavor = source->cr_flavor;
2324 target->cr_uid = source->cr_uid;
2325 target->cr_gid = source->cr_gid;
2326 target->cr_group_info = source->cr_group_info;
2327 get_group_info(target->cr_group_info);
2328 target->cr_gss_mech = source->cr_gss_mech;
2329 if (source->cr_gss_mech)
2330 gss_mech_get(source->cr_gss_mech);
2331 return 0;
2332 }
2333
2334 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2335 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2336 {
2337 if (o1->len < o2->len)
2338 return -1;
2339 if (o1->len > o2->len)
2340 return 1;
2341 return memcmp(o1->data, o2->data, o1->len);
2342 }
2343
2344 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2345 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2346 {
2347 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2348 }
2349
2350 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2351 same_clid(clientid_t *cl1, clientid_t *cl2)
2352 {
2353 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2354 }
2355
groups_equal(struct group_info * g1,struct group_info * g2)2356 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2357 {
2358 int i;
2359
2360 if (g1->ngroups != g2->ngroups)
2361 return false;
2362 for (i=0; i<g1->ngroups; i++)
2363 if (!gid_eq(g1->gid[i], g2->gid[i]))
2364 return false;
2365 return true;
2366 }
2367
2368 /*
2369 * RFC 3530 language requires clid_inuse be returned when the
2370 * "principal" associated with a requests differs from that previously
2371 * used. We use uid, gid's, and gss principal string as our best
2372 * approximation. We also don't want to allow non-gss use of a client
2373 * established using gss: in theory cr_principal should catch that
2374 * change, but in practice cr_principal can be null even in the gss case
2375 * since gssd doesn't always pass down a principal string.
2376 */
is_gss_cred(struct svc_cred * cr)2377 static bool is_gss_cred(struct svc_cred *cr)
2378 {
2379 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2380 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2381 }
2382
2383
2384 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2385 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2386 {
2387 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2388 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2389 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2390 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2391 return false;
2392 /* XXX: check that cr_targ_princ fields match ? */
2393 if (cr1->cr_principal == cr2->cr_principal)
2394 return true;
2395 if (!cr1->cr_principal || !cr2->cr_principal)
2396 return false;
2397 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2398 }
2399
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2400 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2401 {
2402 struct svc_cred *cr = &rqstp->rq_cred;
2403 u32 service;
2404
2405 if (!cr->cr_gss_mech)
2406 return false;
2407 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2408 return service == RPC_GSS_SVC_INTEGRITY ||
2409 service == RPC_GSS_SVC_PRIVACY;
2410 }
2411
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2412 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2413 {
2414 struct svc_cred *cr = &rqstp->rq_cred;
2415
2416 if (!cl->cl_mach_cred)
2417 return true;
2418 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2419 return false;
2420 if (!svc_rqst_integrity_protected(rqstp))
2421 return false;
2422 if (cl->cl_cred.cr_raw_principal)
2423 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2424 cr->cr_raw_principal);
2425 if (!cr->cr_principal)
2426 return false;
2427 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2428 }
2429
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2430 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2431 {
2432 __be32 verf[2];
2433
2434 /*
2435 * This is opaque to client, so no need to byte-swap. Use
2436 * __force to keep sparse happy
2437 */
2438 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2439 verf[1] = (__force __be32)nn->clverifier_counter++;
2440 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2441 }
2442
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2443 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2444 {
2445 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2446 clp->cl_clientid.cl_id = nn->clientid_counter++;
2447 gen_confirm(clp, nn);
2448 }
2449
2450 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2451 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2452 {
2453 struct nfs4_stid *ret;
2454
2455 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2456 if (!ret || !ret->sc_type)
2457 return NULL;
2458 return ret;
2459 }
2460
2461 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,char typemask)2462 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2463 {
2464 struct nfs4_stid *s;
2465
2466 spin_lock(&cl->cl_lock);
2467 s = find_stateid_locked(cl, t);
2468 if (s != NULL) {
2469 if (typemask & s->sc_type)
2470 refcount_inc(&s->sc_count);
2471 else
2472 s = NULL;
2473 }
2474 spin_unlock(&cl->cl_lock);
2475 return s;
2476 }
2477
get_nfsdfs_clp(struct inode * inode)2478 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2479 {
2480 struct nfsdfs_client *nc;
2481 nc = get_nfsdfs_client(inode);
2482 if (!nc)
2483 return NULL;
2484 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2485 }
2486
seq_quote_mem(struct seq_file * m,char * data,int len)2487 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2488 {
2489 seq_printf(m, "\"");
2490 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2491 seq_printf(m, "\"");
2492 }
2493
cb_state2str(int state)2494 static const char *cb_state2str(int state)
2495 {
2496 switch (state) {
2497 case NFSD4_CB_UP:
2498 return "UP";
2499 case NFSD4_CB_UNKNOWN:
2500 return "UNKNOWN";
2501 case NFSD4_CB_DOWN:
2502 return "DOWN";
2503 case NFSD4_CB_FAULT:
2504 return "FAULT";
2505 }
2506 return "UNDEFINED";
2507 }
2508
client_info_show(struct seq_file * m,void * v)2509 static int client_info_show(struct seq_file *m, void *v)
2510 {
2511 struct inode *inode = file_inode(m->file);
2512 struct nfs4_client *clp;
2513 u64 clid;
2514
2515 clp = get_nfsdfs_clp(inode);
2516 if (!clp)
2517 return -ENXIO;
2518 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2519 seq_printf(m, "clientid: 0x%llx\n", clid);
2520 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2521
2522 if (clp->cl_state == NFSD4_COURTESY)
2523 seq_puts(m, "status: courtesy\n");
2524 else if (clp->cl_state == NFSD4_EXPIRABLE)
2525 seq_puts(m, "status: expirable\n");
2526 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2527 seq_puts(m, "status: confirmed\n");
2528 else
2529 seq_puts(m, "status: unconfirmed\n");
2530 seq_printf(m, "seconds from last renew: %lld\n",
2531 ktime_get_boottime_seconds() - clp->cl_time);
2532 seq_printf(m, "name: ");
2533 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2534 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2535 if (clp->cl_nii_domain.data) {
2536 seq_printf(m, "Implementation domain: ");
2537 seq_quote_mem(m, clp->cl_nii_domain.data,
2538 clp->cl_nii_domain.len);
2539 seq_printf(m, "\nImplementation name: ");
2540 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2541 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2542 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2543 }
2544 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2545 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2546 drop_client(clp);
2547
2548 return 0;
2549 }
2550
2551 DEFINE_SHOW_ATTRIBUTE(client_info);
2552
states_start(struct seq_file * s,loff_t * pos)2553 static void *states_start(struct seq_file *s, loff_t *pos)
2554 __acquires(&clp->cl_lock)
2555 {
2556 struct nfs4_client *clp = s->private;
2557 unsigned long id = *pos;
2558 void *ret;
2559
2560 spin_lock(&clp->cl_lock);
2561 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2562 *pos = id;
2563 return ret;
2564 }
2565
states_next(struct seq_file * s,void * v,loff_t * pos)2566 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2567 {
2568 struct nfs4_client *clp = s->private;
2569 unsigned long id = *pos;
2570 void *ret;
2571
2572 id = *pos;
2573 id++;
2574 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2575 *pos = id;
2576 return ret;
2577 }
2578
states_stop(struct seq_file * s,void * v)2579 static void states_stop(struct seq_file *s, void *v)
2580 __releases(&clp->cl_lock)
2581 {
2582 struct nfs4_client *clp = s->private;
2583
2584 spin_unlock(&clp->cl_lock);
2585 }
2586
nfs4_show_fname(struct seq_file * s,struct nfsd_file * f)2587 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2588 {
2589 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2590 }
2591
nfs4_show_superblock(struct seq_file * s,struct nfsd_file * f)2592 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2593 {
2594 struct inode *inode = file_inode(f->nf_file);
2595
2596 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2597 MAJOR(inode->i_sb->s_dev),
2598 MINOR(inode->i_sb->s_dev),
2599 inode->i_ino);
2600 }
2601
nfs4_show_owner(struct seq_file * s,struct nfs4_stateowner * oo)2602 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2603 {
2604 seq_printf(s, "owner: ");
2605 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2606 }
2607
nfs4_show_stateid(struct seq_file * s,stateid_t * stid)2608 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2609 {
2610 seq_printf(s, "0x%.8x", stid->si_generation);
2611 seq_printf(s, "%12phN", &stid->si_opaque);
2612 }
2613
nfs4_show_open(struct seq_file * s,struct nfs4_stid * st)2614 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2615 {
2616 struct nfs4_ol_stateid *ols;
2617 struct nfs4_file *nf;
2618 struct nfsd_file *file;
2619 struct nfs4_stateowner *oo;
2620 unsigned int access, deny;
2621
2622 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2623 return 0; /* XXX: or SEQ_SKIP? */
2624 ols = openlockstateid(st);
2625 oo = ols->st_stateowner;
2626 nf = st->sc_file;
2627
2628 spin_lock(&nf->fi_lock);
2629 file = find_any_file_locked(nf);
2630 if (!file)
2631 goto out;
2632
2633 seq_printf(s, "- ");
2634 nfs4_show_stateid(s, &st->sc_stateid);
2635 seq_printf(s, ": { type: open, ");
2636
2637 access = bmap_to_share_mode(ols->st_access_bmap);
2638 deny = bmap_to_share_mode(ols->st_deny_bmap);
2639
2640 seq_printf(s, "access: %s%s, ",
2641 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2642 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2643 seq_printf(s, "deny: %s%s, ",
2644 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2645 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2646
2647 nfs4_show_superblock(s, file);
2648 seq_printf(s, ", ");
2649 nfs4_show_fname(s, file);
2650 seq_printf(s, ", ");
2651 nfs4_show_owner(s, oo);
2652 seq_printf(s, " }\n");
2653 out:
2654 spin_unlock(&nf->fi_lock);
2655 return 0;
2656 }
2657
nfs4_show_lock(struct seq_file * s,struct nfs4_stid * st)2658 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2659 {
2660 struct nfs4_ol_stateid *ols;
2661 struct nfs4_file *nf;
2662 struct nfsd_file *file;
2663 struct nfs4_stateowner *oo;
2664
2665 ols = openlockstateid(st);
2666 oo = ols->st_stateowner;
2667 nf = st->sc_file;
2668 spin_lock(&nf->fi_lock);
2669 file = find_any_file_locked(nf);
2670 if (!file)
2671 goto out;
2672
2673 seq_printf(s, "- ");
2674 nfs4_show_stateid(s, &st->sc_stateid);
2675 seq_printf(s, ": { type: lock, ");
2676
2677 /*
2678 * Note: a lock stateid isn't really the same thing as a lock,
2679 * it's the locking state held by one owner on a file, and there
2680 * may be multiple (or no) lock ranges associated with it.
2681 * (Same for the matter is true of open stateids.)
2682 */
2683
2684 nfs4_show_superblock(s, file);
2685 /* XXX: open stateid? */
2686 seq_printf(s, ", ");
2687 nfs4_show_fname(s, file);
2688 seq_printf(s, ", ");
2689 nfs4_show_owner(s, oo);
2690 seq_printf(s, " }\n");
2691 out:
2692 spin_unlock(&nf->fi_lock);
2693 return 0;
2694 }
2695
nfs4_show_deleg(struct seq_file * s,struct nfs4_stid * st)2696 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2697 {
2698 struct nfs4_delegation *ds;
2699 struct nfs4_file *nf;
2700 struct nfsd_file *file;
2701
2702 ds = delegstateid(st);
2703 nf = st->sc_file;
2704 spin_lock(&nf->fi_lock);
2705 file = nf->fi_deleg_file;
2706 if (!file)
2707 goto out;
2708
2709 seq_printf(s, "- ");
2710 nfs4_show_stateid(s, &st->sc_stateid);
2711 seq_printf(s, ": { type: deleg, ");
2712
2713 /* Kinda dead code as long as we only support read delegs: */
2714 seq_printf(s, "access: %s, ",
2715 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2716
2717 /* XXX: lease time, whether it's being recalled. */
2718
2719 nfs4_show_superblock(s, file);
2720 seq_printf(s, ", ");
2721 nfs4_show_fname(s, file);
2722 seq_printf(s, " }\n");
2723 out:
2724 spin_unlock(&nf->fi_lock);
2725 return 0;
2726 }
2727
nfs4_show_layout(struct seq_file * s,struct nfs4_stid * st)2728 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2729 {
2730 struct nfs4_layout_stateid *ls;
2731 struct nfsd_file *file;
2732
2733 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2734 file = ls->ls_file;
2735
2736 seq_printf(s, "- ");
2737 nfs4_show_stateid(s, &st->sc_stateid);
2738 seq_printf(s, ": { type: layout, ");
2739
2740 /* XXX: What else would be useful? */
2741
2742 nfs4_show_superblock(s, file);
2743 seq_printf(s, ", ");
2744 nfs4_show_fname(s, file);
2745 seq_printf(s, " }\n");
2746
2747 return 0;
2748 }
2749
states_show(struct seq_file * s,void * v)2750 static int states_show(struct seq_file *s, void *v)
2751 {
2752 struct nfs4_stid *st = v;
2753
2754 switch (st->sc_type) {
2755 case NFS4_OPEN_STID:
2756 return nfs4_show_open(s, st);
2757 case NFS4_LOCK_STID:
2758 return nfs4_show_lock(s, st);
2759 case NFS4_DELEG_STID:
2760 return nfs4_show_deleg(s, st);
2761 case NFS4_LAYOUT_STID:
2762 return nfs4_show_layout(s, st);
2763 default:
2764 return 0; /* XXX: or SEQ_SKIP? */
2765 }
2766 /* XXX: copy stateids? */
2767 }
2768
2769 static struct seq_operations states_seq_ops = {
2770 .start = states_start,
2771 .next = states_next,
2772 .stop = states_stop,
2773 .show = states_show
2774 };
2775
client_states_open(struct inode * inode,struct file * file)2776 static int client_states_open(struct inode *inode, struct file *file)
2777 {
2778 struct seq_file *s;
2779 struct nfs4_client *clp;
2780 int ret;
2781
2782 clp = get_nfsdfs_clp(inode);
2783 if (!clp)
2784 return -ENXIO;
2785
2786 ret = seq_open(file, &states_seq_ops);
2787 if (ret)
2788 return ret;
2789 s = file->private_data;
2790 s->private = clp;
2791 return 0;
2792 }
2793
client_opens_release(struct inode * inode,struct file * file)2794 static int client_opens_release(struct inode *inode, struct file *file)
2795 {
2796 struct seq_file *m = file->private_data;
2797 struct nfs4_client *clp = m->private;
2798
2799 /* XXX: alternatively, we could get/drop in seq start/stop */
2800 drop_client(clp);
2801 return seq_release(inode, file);
2802 }
2803
2804 static const struct file_operations client_states_fops = {
2805 .open = client_states_open,
2806 .read = seq_read,
2807 .llseek = seq_lseek,
2808 .release = client_opens_release,
2809 };
2810
2811 /*
2812 * Normally we refuse to destroy clients that are in use, but here the
2813 * administrator is telling us to just do it. We also want to wait
2814 * so the caller has a guarantee that the client's locks are gone by
2815 * the time the write returns:
2816 */
force_expire_client(struct nfs4_client * clp)2817 static void force_expire_client(struct nfs4_client *clp)
2818 {
2819 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2820 bool already_expired;
2821
2822 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2823
2824 spin_lock(&nn->client_lock);
2825 clp->cl_time = 0;
2826 spin_unlock(&nn->client_lock);
2827
2828 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2829 spin_lock(&nn->client_lock);
2830 already_expired = list_empty(&clp->cl_lru);
2831 if (!already_expired)
2832 unhash_client_locked(clp);
2833 spin_unlock(&nn->client_lock);
2834
2835 if (!already_expired)
2836 expire_client(clp);
2837 else
2838 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2839 }
2840
client_ctl_write(struct file * file,const char __user * buf,size_t size,loff_t * pos)2841 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2842 size_t size, loff_t *pos)
2843 {
2844 char *data;
2845 struct nfs4_client *clp;
2846
2847 data = simple_transaction_get(file, buf, size);
2848 if (IS_ERR(data))
2849 return PTR_ERR(data);
2850 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2851 return -EINVAL;
2852 clp = get_nfsdfs_clp(file_inode(file));
2853 if (!clp)
2854 return -ENXIO;
2855 force_expire_client(clp);
2856 drop_client(clp);
2857 return 7;
2858 }
2859
2860 static const struct file_operations client_ctl_fops = {
2861 .write = client_ctl_write,
2862 .release = simple_transaction_release,
2863 };
2864
2865 static const struct tree_descr client_files[] = {
2866 [0] = {"info", &client_info_fops, S_IRUSR},
2867 [1] = {"states", &client_states_fops, S_IRUSR},
2868 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2869 [3] = {""},
2870 };
2871
2872 static int
nfsd4_cb_recall_any_done(struct nfsd4_callback * cb,struct rpc_task * task)2873 nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
2874 struct rpc_task *task)
2875 {
2876 trace_nfsd_cb_recall_any_done(cb, task);
2877 switch (task->tk_status) {
2878 case -NFS4ERR_DELAY:
2879 rpc_delay(task, 2 * HZ);
2880 return 0;
2881 default:
2882 return 1;
2883 }
2884 }
2885
2886 static void
nfsd4_cb_recall_any_release(struct nfsd4_callback * cb)2887 nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
2888 {
2889 struct nfs4_client *clp = cb->cb_clp;
2890
2891 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
2892 drop_client(clp);
2893 }
2894
2895 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
2896 .done = nfsd4_cb_recall_any_done,
2897 .release = nfsd4_cb_recall_any_release,
2898 };
2899
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)2900 static struct nfs4_client *create_client(struct xdr_netobj name,
2901 struct svc_rqst *rqstp, nfs4_verifier *verf)
2902 {
2903 struct nfs4_client *clp;
2904 struct sockaddr *sa = svc_addr(rqstp);
2905 int ret;
2906 struct net *net = SVC_NET(rqstp);
2907 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2908 struct dentry *dentries[ARRAY_SIZE(client_files)];
2909
2910 clp = alloc_client(name, nn);
2911 if (clp == NULL)
2912 return NULL;
2913
2914 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2915 if (ret) {
2916 free_client(clp);
2917 return NULL;
2918 }
2919 gen_clid(clp, nn);
2920 kref_init(&clp->cl_nfsdfs.cl_ref);
2921 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2922 clp->cl_time = ktime_get_boottime_seconds();
2923 clear_bit(0, &clp->cl_cb_slot_busy);
2924 copy_verf(clp, verf);
2925 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2926 clp->cl_cb_session = NULL;
2927 clp->net = net;
2928 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2929 nn, &clp->cl_nfsdfs,
2930 clp->cl_clientid.cl_id - nn->clientid_base,
2931 client_files, dentries);
2932 clp->cl_nfsd_info_dentry = dentries[0];
2933 if (!clp->cl_nfsd_dentry) {
2934 free_client(clp);
2935 return NULL;
2936 }
2937 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
2938 if (!clp->cl_ra) {
2939 free_client(clp);
2940 return NULL;
2941 }
2942 clp->cl_ra_time = 0;
2943 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
2944 NFSPROC4_CLNT_CB_RECALL_ANY);
2945 return clp;
2946 }
2947
2948 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)2949 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2950 {
2951 struct rb_node **new = &(root->rb_node), *parent = NULL;
2952 struct nfs4_client *clp;
2953
2954 while (*new) {
2955 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2956 parent = *new;
2957
2958 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2959 new = &((*new)->rb_left);
2960 else
2961 new = &((*new)->rb_right);
2962 }
2963
2964 rb_link_node(&new_clp->cl_namenode, parent, new);
2965 rb_insert_color(&new_clp->cl_namenode, root);
2966 }
2967
2968 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)2969 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2970 {
2971 int cmp;
2972 struct rb_node *node = root->rb_node;
2973 struct nfs4_client *clp;
2974
2975 while (node) {
2976 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2977 cmp = compare_blob(&clp->cl_name, name);
2978 if (cmp > 0)
2979 node = node->rb_left;
2980 else if (cmp < 0)
2981 node = node->rb_right;
2982 else
2983 return clp;
2984 }
2985 return NULL;
2986 }
2987
2988 static void
add_to_unconfirmed(struct nfs4_client * clp)2989 add_to_unconfirmed(struct nfs4_client *clp)
2990 {
2991 unsigned int idhashval;
2992 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2993
2994 lockdep_assert_held(&nn->client_lock);
2995
2996 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2997 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2998 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2999 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3000 renew_client_locked(clp);
3001 }
3002
3003 static void
move_to_confirmed(struct nfs4_client * clp)3004 move_to_confirmed(struct nfs4_client *clp)
3005 {
3006 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3007 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3008
3009 lockdep_assert_held(&nn->client_lock);
3010
3011 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3012 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3013 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3014 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3015 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3016 renew_client_locked(clp);
3017 }
3018
3019 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)3020 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3021 {
3022 struct nfs4_client *clp;
3023 unsigned int idhashval = clientid_hashval(clid->cl_id);
3024
3025 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3026 if (same_clid(&clp->cl_clientid, clid)) {
3027 if ((bool)clp->cl_minorversion != sessions)
3028 return NULL;
3029 renew_client_locked(clp);
3030 return clp;
3031 }
3032 }
3033 return NULL;
3034 }
3035
3036 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3037 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3038 {
3039 struct list_head *tbl = nn->conf_id_hashtbl;
3040
3041 lockdep_assert_held(&nn->client_lock);
3042 return find_client_in_id_table(tbl, clid, sessions);
3043 }
3044
3045 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3046 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3047 {
3048 struct list_head *tbl = nn->unconf_id_hashtbl;
3049
3050 lockdep_assert_held(&nn->client_lock);
3051 return find_client_in_id_table(tbl, clid, sessions);
3052 }
3053
clp_used_exchangeid(struct nfs4_client * clp)3054 static bool clp_used_exchangeid(struct nfs4_client *clp)
3055 {
3056 return clp->cl_exchange_flags != 0;
3057 }
3058
3059 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3060 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3061 {
3062 lockdep_assert_held(&nn->client_lock);
3063 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3064 }
3065
3066 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3067 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3068 {
3069 lockdep_assert_held(&nn->client_lock);
3070 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3071 }
3072
3073 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)3074 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3075 {
3076 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3077 struct sockaddr *sa = svc_addr(rqstp);
3078 u32 scopeid = rpc_get_scope_id(sa);
3079 unsigned short expected_family;
3080
3081 /* Currently, we only support tcp and tcp6 for the callback channel */
3082 if (se->se_callback_netid_len == 3 &&
3083 !memcmp(se->se_callback_netid_val, "tcp", 3))
3084 expected_family = AF_INET;
3085 else if (se->se_callback_netid_len == 4 &&
3086 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3087 expected_family = AF_INET6;
3088 else
3089 goto out_err;
3090
3091 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3092 se->se_callback_addr_len,
3093 (struct sockaddr *)&conn->cb_addr,
3094 sizeof(conn->cb_addr));
3095
3096 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3097 goto out_err;
3098
3099 if (conn->cb_addr.ss_family == AF_INET6)
3100 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3101
3102 conn->cb_prog = se->se_callback_prog;
3103 conn->cb_ident = se->se_callback_ident;
3104 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3105 trace_nfsd_cb_args(clp, conn);
3106 return;
3107 out_err:
3108 conn->cb_addr.ss_family = AF_UNSPEC;
3109 conn->cb_addrlen = 0;
3110 trace_nfsd_cb_nodelegs(clp);
3111 return;
3112 }
3113
3114 /*
3115 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3116 */
3117 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)3118 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3119 {
3120 struct xdr_buf *buf = resp->xdr->buf;
3121 struct nfsd4_slot *slot = resp->cstate.slot;
3122 unsigned int base;
3123
3124 dprintk("--> %s slot %p\n", __func__, slot);
3125
3126 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3127 slot->sl_opcnt = resp->opcnt;
3128 slot->sl_status = resp->cstate.status;
3129 free_svc_cred(&slot->sl_cred);
3130 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3131
3132 if (!nfsd4_cache_this(resp)) {
3133 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3134 return;
3135 }
3136 slot->sl_flags |= NFSD4_SLOT_CACHED;
3137
3138 base = resp->cstate.data_offset;
3139 slot->sl_datalen = buf->len - base;
3140 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3141 WARN(1, "%s: sessions DRC could not cache compound\n",
3142 __func__);
3143 return;
3144 }
3145
3146 /*
3147 * Encode the replay sequence operation from the slot values.
3148 * If cachethis is FALSE encode the uncached rep error on the next
3149 * operation which sets resp->p and increments resp->opcnt for
3150 * nfs4svc_encode_compoundres.
3151 *
3152 */
3153 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)3154 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3155 struct nfsd4_compoundres *resp)
3156 {
3157 struct nfsd4_op *op;
3158 struct nfsd4_slot *slot = resp->cstate.slot;
3159
3160 /* Encode the replayed sequence operation */
3161 op = &args->ops[resp->opcnt - 1];
3162 nfsd4_encode_operation(resp, op);
3163
3164 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3165 return op->status;
3166 if (args->opcnt == 1) {
3167 /*
3168 * The original operation wasn't a solo sequence--we
3169 * always cache those--so this retry must not match the
3170 * original:
3171 */
3172 op->status = nfserr_seq_false_retry;
3173 } else {
3174 op = &args->ops[resp->opcnt++];
3175 op->status = nfserr_retry_uncached_rep;
3176 nfsd4_encode_operation(resp, op);
3177 }
3178 return op->status;
3179 }
3180
3181 /*
3182 * The sequence operation is not cached because we can use the slot and
3183 * session values.
3184 */
3185 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)3186 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3187 struct nfsd4_sequence *seq)
3188 {
3189 struct nfsd4_slot *slot = resp->cstate.slot;
3190 struct xdr_stream *xdr = resp->xdr;
3191 __be32 *p;
3192 __be32 status;
3193
3194 dprintk("--> %s slot %p\n", __func__, slot);
3195
3196 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3197 if (status)
3198 return status;
3199
3200 p = xdr_reserve_space(xdr, slot->sl_datalen);
3201 if (!p) {
3202 WARN_ON_ONCE(1);
3203 return nfserr_serverfault;
3204 }
3205 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3206 xdr_commit_encode(xdr);
3207
3208 resp->opcnt = slot->sl_opcnt;
3209 return slot->sl_status;
3210 }
3211
3212 /*
3213 * Set the exchange_id flags returned by the server.
3214 */
3215 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)3216 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3217 {
3218 #ifdef CONFIG_NFSD_PNFS
3219 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3220 #else
3221 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3222 #endif
3223
3224 /* Referrals are supported, Migration is not. */
3225 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3226
3227 /* set the wire flags to return to client. */
3228 clid->flags = new->cl_exchange_flags;
3229 }
3230
client_has_openowners(struct nfs4_client * clp)3231 static bool client_has_openowners(struct nfs4_client *clp)
3232 {
3233 struct nfs4_openowner *oo;
3234
3235 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3236 if (!list_empty(&oo->oo_owner.so_stateids))
3237 return true;
3238 }
3239 return false;
3240 }
3241
client_has_state(struct nfs4_client * clp)3242 static bool client_has_state(struct nfs4_client *clp)
3243 {
3244 return client_has_openowners(clp)
3245 #ifdef CONFIG_NFSD_PNFS
3246 || !list_empty(&clp->cl_lo_states)
3247 #endif
3248 || !list_empty(&clp->cl_delegations)
3249 || !list_empty(&clp->cl_sessions)
3250 || !list_empty(&clp->async_copies);
3251 }
3252
copy_impl_id(struct nfs4_client * clp,struct nfsd4_exchange_id * exid)3253 static __be32 copy_impl_id(struct nfs4_client *clp,
3254 struct nfsd4_exchange_id *exid)
3255 {
3256 if (!exid->nii_domain.data)
3257 return 0;
3258 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3259 if (!clp->cl_nii_domain.data)
3260 return nfserr_jukebox;
3261 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3262 if (!clp->cl_nii_name.data)
3263 return nfserr_jukebox;
3264 clp->cl_nii_time = exid->nii_time;
3265 return 0;
3266 }
3267
3268 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3269 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3270 union nfsd4_op_u *u)
3271 {
3272 struct nfsd4_exchange_id *exid = &u->exchange_id;
3273 struct nfs4_client *conf, *new;
3274 struct nfs4_client *unconf = NULL;
3275 __be32 status;
3276 char addr_str[INET6_ADDRSTRLEN];
3277 nfs4_verifier verf = exid->verifier;
3278 struct sockaddr *sa = svc_addr(rqstp);
3279 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3280 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3281
3282 rpc_ntop(sa, addr_str, sizeof(addr_str));
3283 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3284 "ip_addr=%s flags %x, spa_how %u\n",
3285 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3286 addr_str, exid->flags, exid->spa_how);
3287
3288 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3289 return nfserr_inval;
3290
3291 new = create_client(exid->clname, rqstp, &verf);
3292 if (new == NULL)
3293 return nfserr_jukebox;
3294 status = copy_impl_id(new, exid);
3295 if (status)
3296 goto out_nolock;
3297
3298 switch (exid->spa_how) {
3299 case SP4_MACH_CRED:
3300 exid->spo_must_enforce[0] = 0;
3301 exid->spo_must_enforce[1] = (
3302 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3303 1 << (OP_EXCHANGE_ID - 32) |
3304 1 << (OP_CREATE_SESSION - 32) |
3305 1 << (OP_DESTROY_SESSION - 32) |
3306 1 << (OP_DESTROY_CLIENTID - 32));
3307
3308 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3309 1 << (OP_OPEN_DOWNGRADE) |
3310 1 << (OP_LOCKU) |
3311 1 << (OP_DELEGRETURN));
3312
3313 exid->spo_must_allow[1] &= (
3314 1 << (OP_TEST_STATEID - 32) |
3315 1 << (OP_FREE_STATEID - 32));
3316 if (!svc_rqst_integrity_protected(rqstp)) {
3317 status = nfserr_inval;
3318 goto out_nolock;
3319 }
3320 /*
3321 * Sometimes userspace doesn't give us a principal.
3322 * Which is a bug, really. Anyway, we can't enforce
3323 * MACH_CRED in that case, better to give up now:
3324 */
3325 if (!new->cl_cred.cr_principal &&
3326 !new->cl_cred.cr_raw_principal) {
3327 status = nfserr_serverfault;
3328 goto out_nolock;
3329 }
3330 new->cl_mach_cred = true;
3331 break;
3332 case SP4_NONE:
3333 break;
3334 default: /* checked by xdr code */
3335 WARN_ON_ONCE(1);
3336 fallthrough;
3337 case SP4_SSV:
3338 status = nfserr_encr_alg_unsupp;
3339 goto out_nolock;
3340 }
3341
3342 /* Cases below refer to rfc 5661 section 18.35.4: */
3343 spin_lock(&nn->client_lock);
3344 conf = find_confirmed_client_by_name(&exid->clname, nn);
3345 if (conf) {
3346 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3347 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3348
3349 if (update) {
3350 if (!clp_used_exchangeid(conf)) { /* buggy client */
3351 status = nfserr_inval;
3352 goto out;
3353 }
3354 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3355 status = nfserr_wrong_cred;
3356 goto out;
3357 }
3358 if (!creds_match) { /* case 9 */
3359 status = nfserr_perm;
3360 goto out;
3361 }
3362 if (!verfs_match) { /* case 8 */
3363 status = nfserr_not_same;
3364 goto out;
3365 }
3366 /* case 6 */
3367 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3368 trace_nfsd_clid_confirmed_r(conf);
3369 goto out_copy;
3370 }
3371 if (!creds_match) { /* case 3 */
3372 if (client_has_state(conf)) {
3373 status = nfserr_clid_inuse;
3374 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3375 goto out;
3376 }
3377 goto out_new;
3378 }
3379 if (verfs_match) { /* case 2 */
3380 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3381 trace_nfsd_clid_confirmed_r(conf);
3382 goto out_copy;
3383 }
3384 /* case 5, client reboot */
3385 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3386 conf = NULL;
3387 goto out_new;
3388 }
3389
3390 if (update) { /* case 7 */
3391 status = nfserr_noent;
3392 goto out;
3393 }
3394
3395 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3396 if (unconf) /* case 4, possible retry or client restart */
3397 unhash_client_locked(unconf);
3398
3399 /* case 1, new owner ID */
3400 trace_nfsd_clid_fresh(new);
3401
3402 out_new:
3403 if (conf) {
3404 status = mark_client_expired_locked(conf);
3405 if (status)
3406 goto out;
3407 trace_nfsd_clid_replaced(&conf->cl_clientid);
3408 }
3409 new->cl_minorversion = cstate->minorversion;
3410 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3411 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3412
3413 add_to_unconfirmed(new);
3414 swap(new, conf);
3415 out_copy:
3416 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3417 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3418
3419 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3420 nfsd4_set_ex_flags(conf, exid);
3421
3422 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3423 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3424 status = nfs_ok;
3425
3426 out:
3427 spin_unlock(&nn->client_lock);
3428 out_nolock:
3429 if (new)
3430 expire_client(new);
3431 if (unconf) {
3432 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3433 expire_client(unconf);
3434 }
3435 return status;
3436 }
3437
3438 static __be32
check_slot_seqid(u32 seqid,u32 slot_seqid,int slot_inuse)3439 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3440 {
3441 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3442 slot_seqid);
3443
3444 /* The slot is in use, and no response has been sent. */
3445 if (slot_inuse) {
3446 if (seqid == slot_seqid)
3447 return nfserr_jukebox;
3448 else
3449 return nfserr_seq_misordered;
3450 }
3451 /* Note unsigned 32-bit arithmetic handles wraparound: */
3452 if (likely(seqid == slot_seqid + 1))
3453 return nfs_ok;
3454 if (seqid == slot_seqid)
3455 return nfserr_replay_cache;
3456 return nfserr_seq_misordered;
3457 }
3458
3459 /*
3460 * Cache the create session result into the create session single DRC
3461 * slot cache by saving the xdr structure. sl_seqid has been set.
3462 * Do this for solo or embedded create session operations.
3463 */
3464 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)3465 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3466 struct nfsd4_clid_slot *slot, __be32 nfserr)
3467 {
3468 slot->sl_status = nfserr;
3469 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3470 }
3471
3472 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)3473 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3474 struct nfsd4_clid_slot *slot)
3475 {
3476 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3477 return slot->sl_status;
3478 }
3479
3480 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3481 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3482 1 + /* MIN tag is length with zero, only length */ \
3483 3 + /* version, opcount, opcode */ \
3484 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3485 /* seqid, slotID, slotID, cache */ \
3486 4 ) * sizeof(__be32))
3487
3488 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3489 2 + /* verifier: AUTH_NULL, length 0 */\
3490 1 + /* status */ \
3491 1 + /* MIN tag is length with zero, only length */ \
3492 3 + /* opcount, opcode, opstatus*/ \
3493 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3494 /* seqid, slotID, slotID, slotID, status */ \
3495 5 ) * sizeof(__be32))
3496
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)3497 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3498 {
3499 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3500
3501 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3502 return nfserr_toosmall;
3503 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3504 return nfserr_toosmall;
3505 ca->headerpadsz = 0;
3506 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3507 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3508 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3509 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3510 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3511 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3512 /*
3513 * Note decreasing slot size below client's request may make it
3514 * difficult for client to function correctly, whereas
3515 * decreasing the number of slots will (just?) affect
3516 * performance. When short on memory we therefore prefer to
3517 * decrease number of slots instead of their size. Clients that
3518 * request larger slots than they need will get poor results:
3519 * Note that we always allow at least one slot, because our
3520 * accounting is soft and provides no guarantees either way.
3521 */
3522 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3523
3524 return nfs_ok;
3525 }
3526
3527 /*
3528 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3529 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3530 */
3531 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3532 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3533
3534 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3535 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3536
3537 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3538 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3539 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3540 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3541 sizeof(__be32))
3542
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)3543 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3544 {
3545 ca->headerpadsz = 0;
3546
3547 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3548 return nfserr_toosmall;
3549 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3550 return nfserr_toosmall;
3551 ca->maxresp_cached = 0;
3552 if (ca->maxops < 2)
3553 return nfserr_toosmall;
3554
3555 return nfs_ok;
3556 }
3557
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)3558 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3559 {
3560 switch (cbs->flavor) {
3561 case RPC_AUTH_NULL:
3562 case RPC_AUTH_UNIX:
3563 return nfs_ok;
3564 default:
3565 /*
3566 * GSS case: the spec doesn't allow us to return this
3567 * error. But it also doesn't allow us not to support
3568 * GSS.
3569 * I'd rather this fail hard than return some error the
3570 * client might think it can already handle:
3571 */
3572 return nfserr_encr_alg_unsupp;
3573 }
3574 }
3575
3576 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3577 nfsd4_create_session(struct svc_rqst *rqstp,
3578 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3579 {
3580 struct nfsd4_create_session *cr_ses = &u->create_session;
3581 struct sockaddr *sa = svc_addr(rqstp);
3582 struct nfs4_client *conf, *unconf;
3583 struct nfs4_client *old = NULL;
3584 struct nfsd4_session *new;
3585 struct nfsd4_conn *conn;
3586 struct nfsd4_clid_slot *cs_slot = NULL;
3587 __be32 status = 0;
3588 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3589
3590 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3591 return nfserr_inval;
3592 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3593 if (status)
3594 return status;
3595 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3596 if (status)
3597 return status;
3598 status = check_backchannel_attrs(&cr_ses->back_channel);
3599 if (status)
3600 goto out_release_drc_mem;
3601 status = nfserr_jukebox;
3602 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3603 if (!new)
3604 goto out_release_drc_mem;
3605 conn = alloc_conn_from_crses(rqstp, cr_ses);
3606 if (!conn)
3607 goto out_free_session;
3608
3609 spin_lock(&nn->client_lock);
3610 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3611 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3612 WARN_ON_ONCE(conf && unconf);
3613
3614 if (conf) {
3615 status = nfserr_wrong_cred;
3616 if (!nfsd4_mach_creds_match(conf, rqstp))
3617 goto out_free_conn;
3618 cs_slot = &conf->cl_cs_slot;
3619 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3620 if (status) {
3621 if (status == nfserr_replay_cache)
3622 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3623 goto out_free_conn;
3624 }
3625 } else if (unconf) {
3626 status = nfserr_clid_inuse;
3627 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3628 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3629 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3630 goto out_free_conn;
3631 }
3632 status = nfserr_wrong_cred;
3633 if (!nfsd4_mach_creds_match(unconf, rqstp))
3634 goto out_free_conn;
3635 cs_slot = &unconf->cl_cs_slot;
3636 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3637 if (status) {
3638 /* an unconfirmed replay returns misordered */
3639 status = nfserr_seq_misordered;
3640 goto out_free_conn;
3641 }
3642 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3643 if (old) {
3644 status = mark_client_expired_locked(old);
3645 if (status) {
3646 old = NULL;
3647 goto out_free_conn;
3648 }
3649 trace_nfsd_clid_replaced(&old->cl_clientid);
3650 }
3651 move_to_confirmed(unconf);
3652 conf = unconf;
3653 } else {
3654 status = nfserr_stale_clientid;
3655 goto out_free_conn;
3656 }
3657 status = nfs_ok;
3658 /* Persistent sessions are not supported */
3659 cr_ses->flags &= ~SESSION4_PERSIST;
3660 /* Upshifting from TCP to RDMA is not supported */
3661 cr_ses->flags &= ~SESSION4_RDMA;
3662
3663 init_session(rqstp, new, conf, cr_ses);
3664 nfsd4_get_session_locked(new);
3665
3666 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3667 NFS4_MAX_SESSIONID_LEN);
3668 cs_slot->sl_seqid++;
3669 cr_ses->seqid = cs_slot->sl_seqid;
3670
3671 /* cache solo and embedded create sessions under the client_lock */
3672 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3673 spin_unlock(&nn->client_lock);
3674 if (conf == unconf)
3675 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3676 /* init connection and backchannel */
3677 nfsd4_init_conn(rqstp, conn, new);
3678 nfsd4_put_session(new);
3679 if (old)
3680 expire_client(old);
3681 return status;
3682 out_free_conn:
3683 spin_unlock(&nn->client_lock);
3684 free_conn(conn);
3685 if (old)
3686 expire_client(old);
3687 out_free_session:
3688 __free_session(new);
3689 out_release_drc_mem:
3690 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3691 return status;
3692 }
3693
nfsd4_map_bcts_dir(u32 * dir)3694 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3695 {
3696 switch (*dir) {
3697 case NFS4_CDFC4_FORE:
3698 case NFS4_CDFC4_BACK:
3699 return nfs_ok;
3700 case NFS4_CDFC4_FORE_OR_BOTH:
3701 case NFS4_CDFC4_BACK_OR_BOTH:
3702 *dir = NFS4_CDFC4_BOTH;
3703 return nfs_ok;
3704 }
3705 return nfserr_inval;
3706 }
3707
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3708 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3709 struct nfsd4_compound_state *cstate,
3710 union nfsd4_op_u *u)
3711 {
3712 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3713 struct nfsd4_session *session = cstate->session;
3714 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3715 __be32 status;
3716
3717 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3718 if (status)
3719 return status;
3720 spin_lock(&nn->client_lock);
3721 session->se_cb_prog = bc->bc_cb_program;
3722 session->se_cb_sec = bc->bc_cb_sec;
3723 spin_unlock(&nn->client_lock);
3724
3725 nfsd4_probe_callback(session->se_client);
3726
3727 return nfs_ok;
3728 }
3729
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)3730 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3731 {
3732 struct nfsd4_conn *c;
3733
3734 list_for_each_entry(c, &s->se_conns, cn_persession) {
3735 if (c->cn_xprt == xpt) {
3736 return c;
3737 }
3738 }
3739 return NULL;
3740 }
3741
nfsd4_match_existing_connection(struct svc_rqst * rqst,struct nfsd4_session * session,u32 req,struct nfsd4_conn ** conn)3742 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3743 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3744 {
3745 struct nfs4_client *clp = session->se_client;
3746 struct svc_xprt *xpt = rqst->rq_xprt;
3747 struct nfsd4_conn *c;
3748 __be32 status;
3749
3750 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3751 spin_lock(&clp->cl_lock);
3752 c = __nfsd4_find_conn(xpt, session);
3753 if (!c)
3754 status = nfserr_noent;
3755 else if (req == c->cn_flags)
3756 status = nfs_ok;
3757 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3758 c->cn_flags != NFS4_CDFC4_BACK)
3759 status = nfs_ok;
3760 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3761 c->cn_flags != NFS4_CDFC4_FORE)
3762 status = nfs_ok;
3763 else
3764 status = nfserr_inval;
3765 spin_unlock(&clp->cl_lock);
3766 if (status == nfs_ok && conn)
3767 *conn = c;
3768 return status;
3769 }
3770
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3771 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3772 struct nfsd4_compound_state *cstate,
3773 union nfsd4_op_u *u)
3774 {
3775 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3776 __be32 status;
3777 struct nfsd4_conn *conn;
3778 struct nfsd4_session *session;
3779 struct net *net = SVC_NET(rqstp);
3780 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3781
3782 if (!nfsd4_last_compound_op(rqstp))
3783 return nfserr_not_only_op;
3784 spin_lock(&nn->client_lock);
3785 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3786 spin_unlock(&nn->client_lock);
3787 if (!session)
3788 goto out_no_session;
3789 status = nfserr_wrong_cred;
3790 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3791 goto out;
3792 status = nfsd4_match_existing_connection(rqstp, session,
3793 bcts->dir, &conn);
3794 if (status == nfs_ok) {
3795 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3796 bcts->dir == NFS4_CDFC4_BACK)
3797 conn->cn_flags |= NFS4_CDFC4_BACK;
3798 nfsd4_probe_callback(session->se_client);
3799 goto out;
3800 }
3801 if (status == nfserr_inval)
3802 goto out;
3803 status = nfsd4_map_bcts_dir(&bcts->dir);
3804 if (status)
3805 goto out;
3806 conn = alloc_conn(rqstp, bcts->dir);
3807 status = nfserr_jukebox;
3808 if (!conn)
3809 goto out;
3810 nfsd4_init_conn(rqstp, conn, session);
3811 status = nfs_ok;
3812 out:
3813 nfsd4_put_session(session);
3814 out_no_session:
3815 return status;
3816 }
3817
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)3818 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3819 {
3820 if (!cstate->session)
3821 return false;
3822 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3823 }
3824
3825 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3826 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3827 union nfsd4_op_u *u)
3828 {
3829 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3830 struct nfsd4_session *ses;
3831 __be32 status;
3832 int ref_held_by_me = 0;
3833 struct net *net = SVC_NET(r);
3834 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3835
3836 status = nfserr_not_only_op;
3837 if (nfsd4_compound_in_session(cstate, sessionid)) {
3838 if (!nfsd4_last_compound_op(r))
3839 goto out;
3840 ref_held_by_me++;
3841 }
3842 dump_sessionid(__func__, sessionid);
3843 spin_lock(&nn->client_lock);
3844 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3845 if (!ses)
3846 goto out_client_lock;
3847 status = nfserr_wrong_cred;
3848 if (!nfsd4_mach_creds_match(ses->se_client, r))
3849 goto out_put_session;
3850 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3851 if (status)
3852 goto out_put_session;
3853 unhash_session(ses);
3854 spin_unlock(&nn->client_lock);
3855
3856 nfsd4_probe_callback_sync(ses->se_client);
3857
3858 spin_lock(&nn->client_lock);
3859 status = nfs_ok;
3860 out_put_session:
3861 nfsd4_put_session_locked(ses);
3862 out_client_lock:
3863 spin_unlock(&nn->client_lock);
3864 out:
3865 return status;
3866 }
3867
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)3868 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3869 {
3870 struct nfs4_client *clp = ses->se_client;
3871 struct nfsd4_conn *c;
3872 __be32 status = nfs_ok;
3873 int ret;
3874
3875 spin_lock(&clp->cl_lock);
3876 c = __nfsd4_find_conn(new->cn_xprt, ses);
3877 if (c)
3878 goto out_free;
3879 status = nfserr_conn_not_bound_to_session;
3880 if (clp->cl_mach_cred)
3881 goto out_free;
3882 __nfsd4_hash_conn(new, ses);
3883 spin_unlock(&clp->cl_lock);
3884 ret = nfsd4_register_conn(new);
3885 if (ret)
3886 /* oops; xprt is already down: */
3887 nfsd4_conn_lost(&new->cn_xpt_user);
3888 return nfs_ok;
3889 out_free:
3890 spin_unlock(&clp->cl_lock);
3891 free_conn(new);
3892 return status;
3893 }
3894
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)3895 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3896 {
3897 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3898
3899 return args->opcnt > session->se_fchannel.maxops;
3900 }
3901
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)3902 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3903 struct nfsd4_session *session)
3904 {
3905 struct xdr_buf *xb = &rqstp->rq_arg;
3906
3907 return xb->len > session->se_fchannel.maxreq_sz;
3908 }
3909
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)3910 static bool replay_matches_cache(struct svc_rqst *rqstp,
3911 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3912 {
3913 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3914
3915 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3916 (bool)seq->cachethis)
3917 return false;
3918 /*
3919 * If there's an error then the reply can have fewer ops than
3920 * the call.
3921 */
3922 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3923 return false;
3924 /*
3925 * But if we cached a reply with *more* ops than the call you're
3926 * sending us now, then this new call is clearly not really a
3927 * replay of the old one:
3928 */
3929 if (slot->sl_opcnt > argp->opcnt)
3930 return false;
3931 /* This is the only check explicitly called by spec: */
3932 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3933 return false;
3934 /*
3935 * There may be more comparisons we could actually do, but the
3936 * spec doesn't require us to catch every case where the calls
3937 * don't match (that would require caching the call as well as
3938 * the reply), so we don't bother.
3939 */
3940 return true;
3941 }
3942
3943 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3944 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3945 union nfsd4_op_u *u)
3946 {
3947 struct nfsd4_sequence *seq = &u->sequence;
3948 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3949 struct xdr_stream *xdr = resp->xdr;
3950 struct nfsd4_session *session;
3951 struct nfs4_client *clp;
3952 struct nfsd4_slot *slot;
3953 struct nfsd4_conn *conn;
3954 __be32 status;
3955 int buflen;
3956 struct net *net = SVC_NET(rqstp);
3957 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3958
3959 if (resp->opcnt != 1)
3960 return nfserr_sequence_pos;
3961
3962 /*
3963 * Will be either used or freed by nfsd4_sequence_check_conn
3964 * below.
3965 */
3966 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3967 if (!conn)
3968 return nfserr_jukebox;
3969
3970 spin_lock(&nn->client_lock);
3971 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3972 if (!session)
3973 goto out_no_session;
3974 clp = session->se_client;
3975
3976 status = nfserr_too_many_ops;
3977 if (nfsd4_session_too_many_ops(rqstp, session))
3978 goto out_put_session;
3979
3980 status = nfserr_req_too_big;
3981 if (nfsd4_request_too_big(rqstp, session))
3982 goto out_put_session;
3983
3984 status = nfserr_badslot;
3985 if (seq->slotid >= session->se_fchannel.maxreqs)
3986 goto out_put_session;
3987
3988 slot = session->se_slots[seq->slotid];
3989 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3990
3991 /* We do not negotiate the number of slots yet, so set the
3992 * maxslots to the session maxreqs which is used to encode
3993 * sr_highest_slotid and the sr_target_slot id to maxslots */
3994 seq->maxslots = session->se_fchannel.maxreqs;
3995
3996 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3997 slot->sl_flags & NFSD4_SLOT_INUSE);
3998 if (status == nfserr_replay_cache) {
3999 status = nfserr_seq_misordered;
4000 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4001 goto out_put_session;
4002 status = nfserr_seq_false_retry;
4003 if (!replay_matches_cache(rqstp, seq, slot))
4004 goto out_put_session;
4005 cstate->slot = slot;
4006 cstate->session = session;
4007 cstate->clp = clp;
4008 /* Return the cached reply status and set cstate->status
4009 * for nfsd4_proc_compound processing */
4010 status = nfsd4_replay_cache_entry(resp, seq);
4011 cstate->status = nfserr_replay_cache;
4012 goto out;
4013 }
4014 if (status)
4015 goto out_put_session;
4016
4017 status = nfsd4_sequence_check_conn(conn, session);
4018 conn = NULL;
4019 if (status)
4020 goto out_put_session;
4021
4022 buflen = (seq->cachethis) ?
4023 session->se_fchannel.maxresp_cached :
4024 session->se_fchannel.maxresp_sz;
4025 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4026 nfserr_rep_too_big;
4027 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4028 goto out_put_session;
4029 svc_reserve(rqstp, buflen);
4030
4031 status = nfs_ok;
4032 /* Success! bump slot seqid */
4033 slot->sl_seqid = seq->seqid;
4034 slot->sl_flags |= NFSD4_SLOT_INUSE;
4035 if (seq->cachethis)
4036 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4037 else
4038 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4039
4040 cstate->slot = slot;
4041 cstate->session = session;
4042 cstate->clp = clp;
4043
4044 out:
4045 switch (clp->cl_cb_state) {
4046 case NFSD4_CB_DOWN:
4047 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4048 break;
4049 case NFSD4_CB_FAULT:
4050 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4051 break;
4052 default:
4053 seq->status_flags = 0;
4054 }
4055 if (!list_empty(&clp->cl_revoked))
4056 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4057 out_no_session:
4058 if (conn)
4059 free_conn(conn);
4060 spin_unlock(&nn->client_lock);
4061 return status;
4062 out_put_session:
4063 nfsd4_put_session_locked(session);
4064 goto out_no_session;
4065 }
4066
4067 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)4068 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4069 {
4070 struct nfsd4_compound_state *cs = &resp->cstate;
4071
4072 if (nfsd4_has_session(cs)) {
4073 if (cs->status != nfserr_replay_cache) {
4074 nfsd4_store_cache_entry(resp);
4075 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4076 }
4077 /* Drop session reference that was taken in nfsd4_sequence() */
4078 nfsd4_put_session(cs->session);
4079 } else if (cs->clp)
4080 put_client_renew(cs->clp);
4081 }
4082
4083 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4084 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4085 struct nfsd4_compound_state *cstate,
4086 union nfsd4_op_u *u)
4087 {
4088 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4089 struct nfs4_client *conf, *unconf;
4090 struct nfs4_client *clp = NULL;
4091 __be32 status = 0;
4092 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4093
4094 spin_lock(&nn->client_lock);
4095 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4096 conf = find_confirmed_client(&dc->clientid, true, nn);
4097 WARN_ON_ONCE(conf && unconf);
4098
4099 if (conf) {
4100 if (client_has_state(conf)) {
4101 status = nfserr_clientid_busy;
4102 goto out;
4103 }
4104 status = mark_client_expired_locked(conf);
4105 if (status)
4106 goto out;
4107 clp = conf;
4108 } else if (unconf)
4109 clp = unconf;
4110 else {
4111 status = nfserr_stale_clientid;
4112 goto out;
4113 }
4114 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4115 clp = NULL;
4116 status = nfserr_wrong_cred;
4117 goto out;
4118 }
4119 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4120 unhash_client_locked(clp);
4121 out:
4122 spin_unlock(&nn->client_lock);
4123 if (clp)
4124 expire_client(clp);
4125 return status;
4126 }
4127
4128 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4129 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4130 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4131 {
4132 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4133 struct nfs4_client *clp = cstate->clp;
4134 __be32 status = 0;
4135
4136 if (rc->rca_one_fs) {
4137 if (!cstate->current_fh.fh_dentry)
4138 return nfserr_nofilehandle;
4139 /*
4140 * We don't take advantage of the rca_one_fs case.
4141 * That's OK, it's optional, we can safely ignore it.
4142 */
4143 return nfs_ok;
4144 }
4145
4146 status = nfserr_complete_already;
4147 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4148 goto out;
4149
4150 status = nfserr_stale_clientid;
4151 if (is_client_expired(clp))
4152 /*
4153 * The following error isn't really legal.
4154 * But we only get here if the client just explicitly
4155 * destroyed the client. Surely it no longer cares what
4156 * error it gets back on an operation for the dead
4157 * client.
4158 */
4159 goto out;
4160
4161 status = nfs_ok;
4162 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4163 nfsd4_client_record_create(clp);
4164 inc_reclaim_complete(clp);
4165 out:
4166 return status;
4167 }
4168
4169 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4170 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4171 union nfsd4_op_u *u)
4172 {
4173 struct nfsd4_setclientid *setclid = &u->setclientid;
4174 struct xdr_netobj clname = setclid->se_name;
4175 nfs4_verifier clverifier = setclid->se_verf;
4176 struct nfs4_client *conf, *new;
4177 struct nfs4_client *unconf = NULL;
4178 __be32 status;
4179 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4180
4181 new = create_client(clname, rqstp, &clverifier);
4182 if (new == NULL)
4183 return nfserr_jukebox;
4184 spin_lock(&nn->client_lock);
4185 conf = find_confirmed_client_by_name(&clname, nn);
4186 if (conf && client_has_state(conf)) {
4187 status = nfserr_clid_inuse;
4188 if (clp_used_exchangeid(conf))
4189 goto out;
4190 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4191 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4192 goto out;
4193 }
4194 }
4195 unconf = find_unconfirmed_client_by_name(&clname, nn);
4196 if (unconf)
4197 unhash_client_locked(unconf);
4198 if (conf) {
4199 if (same_verf(&conf->cl_verifier, &clverifier)) {
4200 copy_clid(new, conf);
4201 gen_confirm(new, nn);
4202 } else
4203 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4204 &clverifier);
4205 } else
4206 trace_nfsd_clid_fresh(new);
4207 new->cl_minorversion = 0;
4208 gen_callback(new, setclid, rqstp);
4209 add_to_unconfirmed(new);
4210 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4211 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4212 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4213 new = NULL;
4214 status = nfs_ok;
4215 out:
4216 spin_unlock(&nn->client_lock);
4217 if (new)
4218 free_client(new);
4219 if (unconf) {
4220 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4221 expire_client(unconf);
4222 }
4223 return status;
4224 }
4225
4226 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4227 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4228 struct nfsd4_compound_state *cstate,
4229 union nfsd4_op_u *u)
4230 {
4231 struct nfsd4_setclientid_confirm *setclientid_confirm =
4232 &u->setclientid_confirm;
4233 struct nfs4_client *conf, *unconf;
4234 struct nfs4_client *old = NULL;
4235 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4236 clientid_t * clid = &setclientid_confirm->sc_clientid;
4237 __be32 status;
4238 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4239
4240 if (STALE_CLIENTID(clid, nn))
4241 return nfserr_stale_clientid;
4242
4243 spin_lock(&nn->client_lock);
4244 conf = find_confirmed_client(clid, false, nn);
4245 unconf = find_unconfirmed_client(clid, false, nn);
4246 /*
4247 * We try hard to give out unique clientid's, so if we get an
4248 * attempt to confirm the same clientid with a different cred,
4249 * the client may be buggy; this should never happen.
4250 *
4251 * Nevertheless, RFC 7530 recommends INUSE for this case:
4252 */
4253 status = nfserr_clid_inuse;
4254 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4255 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4256 goto out;
4257 }
4258 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4259 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4260 goto out;
4261 }
4262 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4263 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4264 status = nfs_ok;
4265 } else
4266 status = nfserr_stale_clientid;
4267 goto out;
4268 }
4269 status = nfs_ok;
4270 if (conf) {
4271 old = unconf;
4272 unhash_client_locked(old);
4273 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4274 } else {
4275 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4276 if (old) {
4277 status = nfserr_clid_inuse;
4278 if (client_has_state(old)
4279 && !same_creds(&unconf->cl_cred,
4280 &old->cl_cred)) {
4281 old = NULL;
4282 goto out;
4283 }
4284 status = mark_client_expired_locked(old);
4285 if (status) {
4286 old = NULL;
4287 goto out;
4288 }
4289 trace_nfsd_clid_replaced(&old->cl_clientid);
4290 }
4291 move_to_confirmed(unconf);
4292 conf = unconf;
4293 }
4294 get_client_locked(conf);
4295 spin_unlock(&nn->client_lock);
4296 if (conf == unconf)
4297 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4298 nfsd4_probe_callback(conf);
4299 spin_lock(&nn->client_lock);
4300 put_client_renew_locked(conf);
4301 out:
4302 spin_unlock(&nn->client_lock);
4303 if (old)
4304 expire_client(old);
4305 return status;
4306 }
4307
nfsd4_alloc_file(void)4308 static struct nfs4_file *nfsd4_alloc_file(void)
4309 {
4310 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4311 }
4312
4313 /* OPEN Share state helper functions */
4314
nfsd4_file_init(const struct svc_fh * fh,struct nfs4_file * fp)4315 static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4316 {
4317 refcount_set(&fp->fi_ref, 1);
4318 spin_lock_init(&fp->fi_lock);
4319 INIT_LIST_HEAD(&fp->fi_stateids);
4320 INIT_LIST_HEAD(&fp->fi_delegations);
4321 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4322 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4323 fp->fi_deleg_file = NULL;
4324 fp->fi_had_conflict = false;
4325 fp->fi_share_deny = 0;
4326 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4327 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4328 fp->fi_aliased = false;
4329 fp->fi_inode = d_inode(fh->fh_dentry);
4330 #ifdef CONFIG_NFSD_PNFS
4331 INIT_LIST_HEAD(&fp->fi_lo_states);
4332 atomic_set(&fp->fi_lo_recalls, 0);
4333 #endif
4334 }
4335
4336 void
nfsd4_free_slabs(void)4337 nfsd4_free_slabs(void)
4338 {
4339 kmem_cache_destroy(client_slab);
4340 kmem_cache_destroy(openowner_slab);
4341 kmem_cache_destroy(lockowner_slab);
4342 kmem_cache_destroy(file_slab);
4343 kmem_cache_destroy(stateid_slab);
4344 kmem_cache_destroy(deleg_slab);
4345 kmem_cache_destroy(odstate_slab);
4346 }
4347
4348 int
nfsd4_init_slabs(void)4349 nfsd4_init_slabs(void)
4350 {
4351 client_slab = kmem_cache_create("nfsd4_clients",
4352 sizeof(struct nfs4_client), 0, 0, NULL);
4353 if (client_slab == NULL)
4354 goto out;
4355 openowner_slab = kmem_cache_create("nfsd4_openowners",
4356 sizeof(struct nfs4_openowner), 0, 0, NULL);
4357 if (openowner_slab == NULL)
4358 goto out_free_client_slab;
4359 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4360 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4361 if (lockowner_slab == NULL)
4362 goto out_free_openowner_slab;
4363 file_slab = kmem_cache_create("nfsd4_files",
4364 sizeof(struct nfs4_file), 0, 0, NULL);
4365 if (file_slab == NULL)
4366 goto out_free_lockowner_slab;
4367 stateid_slab = kmem_cache_create("nfsd4_stateids",
4368 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4369 if (stateid_slab == NULL)
4370 goto out_free_file_slab;
4371 deleg_slab = kmem_cache_create("nfsd4_delegations",
4372 sizeof(struct nfs4_delegation), 0, 0, NULL);
4373 if (deleg_slab == NULL)
4374 goto out_free_stateid_slab;
4375 odstate_slab = kmem_cache_create("nfsd4_odstate",
4376 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4377 if (odstate_slab == NULL)
4378 goto out_free_deleg_slab;
4379 return 0;
4380
4381 out_free_deleg_slab:
4382 kmem_cache_destroy(deleg_slab);
4383 out_free_stateid_slab:
4384 kmem_cache_destroy(stateid_slab);
4385 out_free_file_slab:
4386 kmem_cache_destroy(file_slab);
4387 out_free_lockowner_slab:
4388 kmem_cache_destroy(lockowner_slab);
4389 out_free_openowner_slab:
4390 kmem_cache_destroy(openowner_slab);
4391 out_free_client_slab:
4392 kmem_cache_destroy(client_slab);
4393 out:
4394 return -ENOMEM;
4395 }
4396
4397 static unsigned long
nfsd4_state_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)4398 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4399 {
4400 int count;
4401 struct nfsd_net *nn = container_of(shrink,
4402 struct nfsd_net, nfsd_client_shrinker);
4403
4404 count = atomic_read(&nn->nfsd_courtesy_clients);
4405 if (!count)
4406 count = atomic_long_read(&num_delegations);
4407 if (count)
4408 queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4409 return (unsigned long)count;
4410 }
4411
4412 static unsigned long
nfsd4_state_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)4413 nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4414 {
4415 return SHRINK_STOP;
4416 }
4417
4418 void
nfsd4_init_leases_net(struct nfsd_net * nn)4419 nfsd4_init_leases_net(struct nfsd_net *nn)
4420 {
4421 struct sysinfo si;
4422 u64 max_clients;
4423
4424 nn->nfsd4_lease = 90; /* default lease time */
4425 nn->nfsd4_grace = 90;
4426 nn->somebody_reclaimed = false;
4427 nn->track_reclaim_completes = false;
4428 nn->clverifier_counter = get_random_u32();
4429 nn->clientid_base = get_random_u32();
4430 nn->clientid_counter = nn->clientid_base + 1;
4431 nn->s2s_cp_cl_id = nn->clientid_counter++;
4432
4433 atomic_set(&nn->nfs4_client_count, 0);
4434 si_meminfo(&si);
4435 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4436 max_clients *= NFS4_CLIENTS_PER_GB;
4437 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4438
4439 atomic_set(&nn->nfsd_courtesy_clients, 0);
4440 }
4441
init_nfs4_replay(struct nfs4_replay * rp)4442 static void init_nfs4_replay(struct nfs4_replay *rp)
4443 {
4444 rp->rp_status = nfserr_serverfault;
4445 rp->rp_buflen = 0;
4446 rp->rp_buf = rp->rp_ibuf;
4447 mutex_init(&rp->rp_mutex);
4448 }
4449
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)4450 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4451 struct nfs4_stateowner *so)
4452 {
4453 if (!nfsd4_has_session(cstate)) {
4454 mutex_lock(&so->so_replay.rp_mutex);
4455 cstate->replay_owner = nfs4_get_stateowner(so);
4456 }
4457 }
4458
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)4459 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4460 {
4461 struct nfs4_stateowner *so = cstate->replay_owner;
4462
4463 if (so != NULL) {
4464 cstate->replay_owner = NULL;
4465 mutex_unlock(&so->so_replay.rp_mutex);
4466 nfs4_put_stateowner(so);
4467 }
4468 }
4469
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)4470 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4471 {
4472 struct nfs4_stateowner *sop;
4473
4474 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4475 if (!sop)
4476 return NULL;
4477
4478 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4479 if (!sop->so_owner.data) {
4480 kmem_cache_free(slab, sop);
4481 return NULL;
4482 }
4483
4484 INIT_LIST_HEAD(&sop->so_stateids);
4485 sop->so_client = clp;
4486 init_nfs4_replay(&sop->so_replay);
4487 atomic_set(&sop->so_count, 1);
4488 return sop;
4489 }
4490
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)4491 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4492 {
4493 lockdep_assert_held(&clp->cl_lock);
4494
4495 list_add(&oo->oo_owner.so_strhash,
4496 &clp->cl_ownerstr_hashtbl[strhashval]);
4497 list_add(&oo->oo_perclient, &clp->cl_openowners);
4498 }
4499
nfs4_unhash_openowner(struct nfs4_stateowner * so)4500 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4501 {
4502 unhash_openowner_locked(openowner(so));
4503 }
4504
nfs4_free_openowner(struct nfs4_stateowner * so)4505 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4506 {
4507 struct nfs4_openowner *oo = openowner(so);
4508
4509 kmem_cache_free(openowner_slab, oo);
4510 }
4511
4512 static const struct nfs4_stateowner_operations openowner_ops = {
4513 .so_unhash = nfs4_unhash_openowner,
4514 .so_free = nfs4_free_openowner,
4515 };
4516
4517 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4518 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4519 {
4520 struct nfs4_ol_stateid *local, *ret = NULL;
4521 struct nfs4_openowner *oo = open->op_openowner;
4522
4523 lockdep_assert_held(&fp->fi_lock);
4524
4525 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4526 /* ignore lock owners */
4527 if (local->st_stateowner->so_is_open_owner == 0)
4528 continue;
4529 if (local->st_stateowner != &oo->oo_owner)
4530 continue;
4531 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4532 ret = local;
4533 refcount_inc(&ret->st_stid.sc_count);
4534 break;
4535 }
4536 }
4537 return ret;
4538 }
4539
4540 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)4541 nfsd4_verify_open_stid(struct nfs4_stid *s)
4542 {
4543 __be32 ret = nfs_ok;
4544
4545 switch (s->sc_type) {
4546 default:
4547 break;
4548 case 0:
4549 case NFS4_CLOSED_STID:
4550 case NFS4_CLOSED_DELEG_STID:
4551 ret = nfserr_bad_stateid;
4552 break;
4553 case NFS4_REVOKED_DELEG_STID:
4554 ret = nfserr_deleg_revoked;
4555 }
4556 return ret;
4557 }
4558
4559 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4560 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)4561 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4562 {
4563 __be32 ret;
4564
4565 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4566 ret = nfsd4_verify_open_stid(&stp->st_stid);
4567 if (ret != nfs_ok)
4568 mutex_unlock(&stp->st_mutex);
4569 return ret;
4570 }
4571
4572 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4573 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4574 {
4575 struct nfs4_ol_stateid *stp;
4576 for (;;) {
4577 spin_lock(&fp->fi_lock);
4578 stp = nfsd4_find_existing_open(fp, open);
4579 spin_unlock(&fp->fi_lock);
4580 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4581 break;
4582 nfs4_put_stid(&stp->st_stid);
4583 }
4584 return stp;
4585 }
4586
4587 static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)4588 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4589 struct nfsd4_compound_state *cstate)
4590 {
4591 struct nfs4_client *clp = cstate->clp;
4592 struct nfs4_openowner *oo, *ret;
4593
4594 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4595 if (!oo)
4596 return NULL;
4597 oo->oo_owner.so_ops = &openowner_ops;
4598 oo->oo_owner.so_is_open_owner = 1;
4599 oo->oo_owner.so_seqid = open->op_seqid;
4600 oo->oo_flags = 0;
4601 if (nfsd4_has_session(cstate))
4602 oo->oo_flags |= NFS4_OO_CONFIRMED;
4603 oo->oo_time = 0;
4604 oo->oo_last_closed_stid = NULL;
4605 INIT_LIST_HEAD(&oo->oo_close_lru);
4606 spin_lock(&clp->cl_lock);
4607 ret = find_openstateowner_str_locked(strhashval, open, clp);
4608 if (ret == NULL) {
4609 hash_openowner(oo, clp, strhashval);
4610 ret = oo;
4611 } else
4612 nfs4_free_stateowner(&oo->oo_owner);
4613
4614 spin_unlock(&clp->cl_lock);
4615 return ret;
4616 }
4617
4618 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)4619 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4620 {
4621
4622 struct nfs4_openowner *oo = open->op_openowner;
4623 struct nfs4_ol_stateid *retstp = NULL;
4624 struct nfs4_ol_stateid *stp;
4625
4626 stp = open->op_stp;
4627 /* We are moving these outside of the spinlocks to avoid the warnings */
4628 mutex_init(&stp->st_mutex);
4629 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4630
4631 retry:
4632 spin_lock(&oo->oo_owner.so_client->cl_lock);
4633 spin_lock(&fp->fi_lock);
4634
4635 retstp = nfsd4_find_existing_open(fp, open);
4636 if (retstp)
4637 goto out_unlock;
4638
4639 open->op_stp = NULL;
4640 refcount_inc(&stp->st_stid.sc_count);
4641 stp->st_stid.sc_type = NFS4_OPEN_STID;
4642 INIT_LIST_HEAD(&stp->st_locks);
4643 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4644 get_nfs4_file(fp);
4645 stp->st_stid.sc_file = fp;
4646 stp->st_access_bmap = 0;
4647 stp->st_deny_bmap = 0;
4648 stp->st_openstp = NULL;
4649 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4650 list_add(&stp->st_perfile, &fp->fi_stateids);
4651
4652 out_unlock:
4653 spin_unlock(&fp->fi_lock);
4654 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4655 if (retstp) {
4656 /* Handle races with CLOSE */
4657 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4658 nfs4_put_stid(&retstp->st_stid);
4659 goto retry;
4660 }
4661 /* To keep mutex tracking happy */
4662 mutex_unlock(&stp->st_mutex);
4663 stp = retstp;
4664 }
4665 return stp;
4666 }
4667
4668 /*
4669 * In the 4.0 case we need to keep the owners around a little while to handle
4670 * CLOSE replay. We still do need to release any file access that is held by
4671 * them before returning however.
4672 */
4673 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)4674 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4675 {
4676 struct nfs4_ol_stateid *last;
4677 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4678 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4679 nfsd_net_id);
4680
4681 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4682
4683 /*
4684 * We know that we hold one reference via nfsd4_close, and another
4685 * "persistent" reference for the client. If the refcount is higher
4686 * than 2, then there are still calls in progress that are using this
4687 * stateid. We can't put the sc_file reference until they are finished.
4688 * Wait for the refcount to drop to 2. Since it has been unhashed,
4689 * there should be no danger of the refcount going back up again at
4690 * this point.
4691 */
4692 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4693
4694 release_all_access(s);
4695 if (s->st_stid.sc_file) {
4696 put_nfs4_file(s->st_stid.sc_file);
4697 s->st_stid.sc_file = NULL;
4698 }
4699
4700 spin_lock(&nn->client_lock);
4701 last = oo->oo_last_closed_stid;
4702 oo->oo_last_closed_stid = s;
4703 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4704 oo->oo_time = ktime_get_boottime_seconds();
4705 spin_unlock(&nn->client_lock);
4706 if (last)
4707 nfs4_put_stid(&last->st_stid);
4708 }
4709
4710 static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_lookup(const struct svc_fh * fhp)4711 nfsd4_file_hash_lookup(const struct svc_fh *fhp)
4712 {
4713 struct inode *inode = d_inode(fhp->fh_dentry);
4714 struct rhlist_head *tmp, *list;
4715 struct nfs4_file *fi;
4716
4717 rcu_read_lock();
4718 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4719 nfs4_file_rhash_params);
4720 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4721 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4722 if (refcount_inc_not_zero(&fi->fi_ref)) {
4723 rcu_read_unlock();
4724 return fi;
4725 }
4726 }
4727 }
4728 rcu_read_unlock();
4729 return NULL;
4730 }
4731
4732 /*
4733 * On hash insertion, identify entries with the same inode but
4734 * distinct filehandles. They will all be on the list returned
4735 * by rhltable_lookup().
4736 *
4737 * inode->i_lock prevents racing insertions from adding an entry
4738 * for the same inode/fhp pair twice.
4739 */
4740 static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_insert(struct nfs4_file * new,const struct svc_fh * fhp)4741 nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
4742 {
4743 struct inode *inode = d_inode(fhp->fh_dentry);
4744 struct rhlist_head *tmp, *list;
4745 struct nfs4_file *ret = NULL;
4746 bool alias_found = false;
4747 struct nfs4_file *fi;
4748 int err;
4749
4750 rcu_read_lock();
4751 spin_lock(&inode->i_lock);
4752
4753 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4754 nfs4_file_rhash_params);
4755 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4756 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4757 if (refcount_inc_not_zero(&fi->fi_ref))
4758 ret = fi;
4759 } else
4760 fi->fi_aliased = alias_found = true;
4761 }
4762 if (ret)
4763 goto out_unlock;
4764
4765 nfsd4_file_init(fhp, new);
4766 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
4767 nfs4_file_rhash_params);
4768 if (err)
4769 goto out_unlock;
4770
4771 new->fi_aliased = alias_found;
4772 ret = new;
4773
4774 out_unlock:
4775 spin_unlock(&inode->i_lock);
4776 rcu_read_unlock();
4777 return ret;
4778 }
4779
nfsd4_file_hash_remove(struct nfs4_file * fi)4780 static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
4781 {
4782 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
4783 nfs4_file_rhash_params);
4784 }
4785
4786 /*
4787 * Called to check deny when READ with all zero stateid or
4788 * WRITE with all zero or all one stateid
4789 */
4790 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)4791 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4792 {
4793 struct nfs4_file *fp;
4794 __be32 ret = nfs_ok;
4795
4796 fp = nfsd4_file_hash_lookup(current_fh);
4797 if (!fp)
4798 return ret;
4799
4800 /* Check for conflicting share reservations */
4801 spin_lock(&fp->fi_lock);
4802 if (fp->fi_share_deny & deny_type)
4803 ret = nfserr_locked;
4804 spin_unlock(&fp->fi_lock);
4805 put_nfs4_file(fp);
4806 return ret;
4807 }
4808
nfsd4_deleg_present(const struct inode * inode)4809 static bool nfsd4_deleg_present(const struct inode *inode)
4810 {
4811 struct file_lock_context *ctx = locks_inode_context(inode);
4812
4813 return ctx && !list_empty_careful(&ctx->flc_lease);
4814 }
4815
4816 /**
4817 * nfsd_wait_for_delegreturn - wait for delegations to be returned
4818 * @rqstp: the RPC transaction being executed
4819 * @inode: in-core inode of the file being waited for
4820 *
4821 * The timeout prevents deadlock if all nfsd threads happen to be
4822 * tied up waiting for returning delegations.
4823 *
4824 * Return values:
4825 * %true: delegation was returned
4826 * %false: timed out waiting for delegreturn
4827 */
nfsd_wait_for_delegreturn(struct svc_rqst * rqstp,struct inode * inode)4828 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
4829 {
4830 long __maybe_unused timeo;
4831
4832 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
4833 NFSD_DELEGRETURN_TIMEOUT);
4834 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
4835 return timeo > 0;
4836 }
4837
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)4838 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4839 {
4840 struct nfs4_delegation *dp = cb_to_delegation(cb);
4841 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4842 nfsd_net_id);
4843
4844 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4845
4846 /*
4847 * We can't do this in nfsd_break_deleg_cb because it is
4848 * already holding inode->i_lock.
4849 *
4850 * If the dl_time != 0, then we know that it has already been
4851 * queued for a lease break. Don't queue it again.
4852 */
4853 spin_lock(&state_lock);
4854 if (delegation_hashed(dp) && dp->dl_time == 0) {
4855 dp->dl_time = ktime_get_boottime_seconds();
4856 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4857 }
4858 spin_unlock(&state_lock);
4859 }
4860
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)4861 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4862 struct rpc_task *task)
4863 {
4864 struct nfs4_delegation *dp = cb_to_delegation(cb);
4865
4866 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
4867
4868 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4869 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4870 return 1;
4871
4872 switch (task->tk_status) {
4873 case 0:
4874 return 1;
4875 case -NFS4ERR_DELAY:
4876 rpc_delay(task, 2 * HZ);
4877 return 0;
4878 case -EBADHANDLE:
4879 case -NFS4ERR_BAD_STATEID:
4880 /*
4881 * Race: client probably got cb_recall before open reply
4882 * granting delegation.
4883 */
4884 if (dp->dl_retries--) {
4885 rpc_delay(task, 2 * HZ);
4886 return 0;
4887 }
4888 fallthrough;
4889 default:
4890 return 1;
4891 }
4892 }
4893
nfsd4_cb_recall_release(struct nfsd4_callback * cb)4894 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4895 {
4896 struct nfs4_delegation *dp = cb_to_delegation(cb);
4897
4898 nfs4_put_stid(&dp->dl_stid);
4899 }
4900
4901 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4902 .prepare = nfsd4_cb_recall_prepare,
4903 .done = nfsd4_cb_recall_done,
4904 .release = nfsd4_cb_recall_release,
4905 };
4906
nfsd_break_one_deleg(struct nfs4_delegation * dp)4907 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4908 {
4909 /*
4910 * We're assuming the state code never drops its reference
4911 * without first removing the lease. Since we're in this lease
4912 * callback (and since the lease code is serialized by the
4913 * flc_lock) we know the server hasn't removed the lease yet, and
4914 * we know it's safe to take a reference.
4915 */
4916 refcount_inc(&dp->dl_stid.sc_count);
4917 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
4918 }
4919
4920 /* Called from break_lease() with flc_lock held. */
4921 static bool
nfsd_break_deleg_cb(struct file_lock * fl)4922 nfsd_break_deleg_cb(struct file_lock *fl)
4923 {
4924 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4925 struct nfs4_file *fp = dp->dl_stid.sc_file;
4926 struct nfs4_client *clp = dp->dl_stid.sc_client;
4927 struct nfsd_net *nn;
4928
4929 trace_nfsd_cb_recall(&dp->dl_stid);
4930
4931 dp->dl_recalled = true;
4932 atomic_inc(&clp->cl_delegs_in_recall);
4933 if (try_to_expire_client(clp)) {
4934 nn = net_generic(clp->net, nfsd_net_id);
4935 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
4936 }
4937
4938 /*
4939 * We don't want the locks code to timeout the lease for us;
4940 * we'll remove it ourself if a delegation isn't returned
4941 * in time:
4942 */
4943 fl->fl_break_time = 0;
4944
4945 fp->fi_had_conflict = true;
4946 nfsd_break_one_deleg(dp);
4947 return false;
4948 }
4949
4950 /**
4951 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4952 * @fl: Lock state to check
4953 *
4954 * Return values:
4955 * %true: Lease conflict was resolved
4956 * %false: Lease conflict was not resolved.
4957 */
nfsd_breaker_owns_lease(struct file_lock * fl)4958 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4959 {
4960 struct nfs4_delegation *dl = fl->fl_owner;
4961 struct svc_rqst *rqst;
4962 struct nfs4_client *clp;
4963
4964 if (!i_am_nfsd())
4965 return false;
4966 rqst = kthread_data(current);
4967 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4968 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4969 return false;
4970 clp = *(rqst->rq_lease_breaker);
4971 return dl->dl_stid.sc_client == clp;
4972 }
4973
4974 static int
nfsd_change_deleg_cb(struct file_lock * onlist,int arg,struct list_head * dispose)4975 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4976 struct list_head *dispose)
4977 {
4978 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
4979 struct nfs4_client *clp = dp->dl_stid.sc_client;
4980
4981 if (arg & F_UNLCK) {
4982 if (dp->dl_recalled)
4983 atomic_dec(&clp->cl_delegs_in_recall);
4984 return lease_modify(onlist, arg, dispose);
4985 } else
4986 return -EAGAIN;
4987 }
4988
4989 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4990 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4991 .lm_break = nfsd_break_deleg_cb,
4992 .lm_change = nfsd_change_deleg_cb,
4993 };
4994
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)4995 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4996 {
4997 if (nfsd4_has_session(cstate))
4998 return nfs_ok;
4999 if (seqid == so->so_seqid - 1)
5000 return nfserr_replay_me;
5001 if (seqid == so->so_seqid)
5002 return nfs_ok;
5003 return nfserr_bad_seqid;
5004 }
5005
lookup_clientid(clientid_t * clid,bool sessions,struct nfsd_net * nn)5006 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5007 struct nfsd_net *nn)
5008 {
5009 struct nfs4_client *found;
5010
5011 spin_lock(&nn->client_lock);
5012 found = find_confirmed_client(clid, sessions, nn);
5013 if (found)
5014 atomic_inc(&found->cl_rpc_users);
5015 spin_unlock(&nn->client_lock);
5016 return found;
5017 }
5018
set_client(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)5019 static __be32 set_client(clientid_t *clid,
5020 struct nfsd4_compound_state *cstate,
5021 struct nfsd_net *nn)
5022 {
5023 if (cstate->clp) {
5024 if (!same_clid(&cstate->clp->cl_clientid, clid))
5025 return nfserr_stale_clientid;
5026 return nfs_ok;
5027 }
5028 if (STALE_CLIENTID(clid, nn))
5029 return nfserr_stale_clientid;
5030 /*
5031 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5032 * set cstate->clp), so session = false:
5033 */
5034 cstate->clp = lookup_clientid(clid, false, nn);
5035 if (!cstate->clp)
5036 return nfserr_expired;
5037 return nfs_ok;
5038 }
5039
5040 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)5041 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5042 struct nfsd4_open *open, struct nfsd_net *nn)
5043 {
5044 clientid_t *clientid = &open->op_clientid;
5045 struct nfs4_client *clp = NULL;
5046 unsigned int strhashval;
5047 struct nfs4_openowner *oo = NULL;
5048 __be32 status;
5049
5050 /*
5051 * In case we need it later, after we've already created the
5052 * file and don't want to risk a further failure:
5053 */
5054 open->op_file = nfsd4_alloc_file();
5055 if (open->op_file == NULL)
5056 return nfserr_jukebox;
5057
5058 status = set_client(clientid, cstate, nn);
5059 if (status)
5060 return status;
5061 clp = cstate->clp;
5062
5063 strhashval = ownerstr_hashval(&open->op_owner);
5064 oo = find_openstateowner_str(strhashval, open, clp);
5065 open->op_openowner = oo;
5066 if (!oo) {
5067 goto new_owner;
5068 }
5069 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5070 /* Replace unconfirmed owners without checking for replay. */
5071 release_openowner(oo);
5072 open->op_openowner = NULL;
5073 goto new_owner;
5074 }
5075 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5076 if (status)
5077 return status;
5078 goto alloc_stateid;
5079 new_owner:
5080 oo = alloc_init_open_stateowner(strhashval, open, cstate);
5081 if (oo == NULL)
5082 return nfserr_jukebox;
5083 open->op_openowner = oo;
5084 alloc_stateid:
5085 open->op_stp = nfs4_alloc_open_stateid(clp);
5086 if (!open->op_stp)
5087 return nfserr_jukebox;
5088
5089 if (nfsd4_has_session(cstate) &&
5090 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5091 open->op_odstate = alloc_clnt_odstate(clp);
5092 if (!open->op_odstate)
5093 return nfserr_jukebox;
5094 }
5095
5096 return nfs_ok;
5097 }
5098
5099 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)5100 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5101 {
5102 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5103 return nfserr_openmode;
5104 else
5105 return nfs_ok;
5106 }
5107
share_access_to_flags(u32 share_access)5108 static int share_access_to_flags(u32 share_access)
5109 {
5110 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5111 }
5112
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)5113 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
5114 {
5115 struct nfs4_stid *ret;
5116
5117 ret = find_stateid_by_type(cl, s,
5118 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
5119 if (!ret)
5120 return NULL;
5121 return delegstateid(ret);
5122 }
5123
nfsd4_is_deleg_cur(struct nfsd4_open * open)5124 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5125 {
5126 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5127 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5128 }
5129
5130 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)5131 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5132 struct nfs4_delegation **dp)
5133 {
5134 int flags;
5135 __be32 status = nfserr_bad_stateid;
5136 struct nfs4_delegation *deleg;
5137
5138 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5139 if (deleg == NULL)
5140 goto out;
5141 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
5142 nfs4_put_stid(&deleg->dl_stid);
5143 if (cl->cl_minorversion)
5144 status = nfserr_deleg_revoked;
5145 goto out;
5146 }
5147 flags = share_access_to_flags(open->op_share_access);
5148 status = nfs4_check_delegmode(deleg, flags);
5149 if (status) {
5150 nfs4_put_stid(&deleg->dl_stid);
5151 goto out;
5152 }
5153 *dp = deleg;
5154 out:
5155 if (!nfsd4_is_deleg_cur(open))
5156 return nfs_ok;
5157 if (status)
5158 return status;
5159 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5160 return nfs_ok;
5161 }
5162
nfs4_access_to_access(u32 nfs4_access)5163 static inline int nfs4_access_to_access(u32 nfs4_access)
5164 {
5165 int flags = 0;
5166
5167 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5168 flags |= NFSD_MAY_READ;
5169 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5170 flags |= NFSD_MAY_WRITE;
5171 return flags;
5172 }
5173
5174 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)5175 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5176 struct nfsd4_open *open)
5177 {
5178 struct iattr iattr = {
5179 .ia_valid = ATTR_SIZE,
5180 .ia_size = 0,
5181 };
5182 struct nfsd_attrs attrs = {
5183 .na_iattr = &iattr,
5184 };
5185 if (!open->op_truncate)
5186 return 0;
5187 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5188 return nfserr_inval;
5189 return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
5190 }
5191
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open,bool new_stp)5192 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5193 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5194 struct nfsd4_open *open, bool new_stp)
5195 {
5196 struct nfsd_file *nf = NULL;
5197 __be32 status;
5198 int oflag = nfs4_access_to_omode(open->op_share_access);
5199 int access = nfs4_access_to_access(open->op_share_access);
5200 unsigned char old_access_bmap, old_deny_bmap;
5201
5202 spin_lock(&fp->fi_lock);
5203
5204 /*
5205 * Are we trying to set a deny mode that would conflict with
5206 * current access?
5207 */
5208 status = nfs4_file_check_deny(fp, open->op_share_deny);
5209 if (status != nfs_ok) {
5210 if (status != nfserr_share_denied) {
5211 spin_unlock(&fp->fi_lock);
5212 goto out;
5213 }
5214 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5215 stp, open->op_share_deny, false))
5216 status = nfserr_jukebox;
5217 spin_unlock(&fp->fi_lock);
5218 goto out;
5219 }
5220
5221 /* set access to the file */
5222 status = nfs4_file_get_access(fp, open->op_share_access);
5223 if (status != nfs_ok) {
5224 if (status != nfserr_share_denied) {
5225 spin_unlock(&fp->fi_lock);
5226 goto out;
5227 }
5228 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5229 stp, open->op_share_access, true))
5230 status = nfserr_jukebox;
5231 spin_unlock(&fp->fi_lock);
5232 goto out;
5233 }
5234
5235 /* Set access bits in stateid */
5236 old_access_bmap = stp->st_access_bmap;
5237 set_access(open->op_share_access, stp);
5238
5239 /* Set new deny mask */
5240 old_deny_bmap = stp->st_deny_bmap;
5241 set_deny(open->op_share_deny, stp);
5242 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5243
5244 if (!fp->fi_fds[oflag]) {
5245 spin_unlock(&fp->fi_lock);
5246
5247 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5248 open->op_filp, &nf);
5249 if (status != nfs_ok)
5250 goto out_put_access;
5251
5252 spin_lock(&fp->fi_lock);
5253 if (!fp->fi_fds[oflag]) {
5254 fp->fi_fds[oflag] = nf;
5255 nf = NULL;
5256 }
5257 }
5258 spin_unlock(&fp->fi_lock);
5259 if (nf)
5260 nfsd_file_put(nf);
5261
5262 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5263 access));
5264 if (status)
5265 goto out_put_access;
5266
5267 status = nfsd4_truncate(rqstp, cur_fh, open);
5268 if (status)
5269 goto out_put_access;
5270 out:
5271 return status;
5272 out_put_access:
5273 stp->st_access_bmap = old_access_bmap;
5274 nfs4_file_put_access(fp, open->op_share_access);
5275 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5276 goto out;
5277 }
5278
5279 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)5280 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5281 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5282 struct nfsd4_open *open)
5283 {
5284 __be32 status;
5285 unsigned char old_deny_bmap = stp->st_deny_bmap;
5286
5287 if (!test_access(open->op_share_access, stp))
5288 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5289
5290 /* test and set deny mode */
5291 spin_lock(&fp->fi_lock);
5292 status = nfs4_file_check_deny(fp, open->op_share_deny);
5293 switch (status) {
5294 case nfs_ok:
5295 set_deny(open->op_share_deny, stp);
5296 fp->fi_share_deny |=
5297 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5298 break;
5299 case nfserr_share_denied:
5300 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5301 stp, open->op_share_deny, false))
5302 status = nfserr_jukebox;
5303 break;
5304 }
5305 spin_unlock(&fp->fi_lock);
5306
5307 if (status != nfs_ok)
5308 return status;
5309
5310 status = nfsd4_truncate(rqstp, cur_fh, open);
5311 if (status != nfs_ok)
5312 reset_union_bmap_deny(old_deny_bmap, stp);
5313 return status;
5314 }
5315
5316 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)5317 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5318 {
5319 if (clp->cl_cb_state == NFSD4_CB_UP)
5320 return true;
5321 /*
5322 * In the sessions case, since we don't have to establish a
5323 * separate connection for callbacks, we assume it's OK
5324 * until we hear otherwise:
5325 */
5326 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5327 }
5328
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)5329 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5330 int flag)
5331 {
5332 struct file_lock *fl;
5333
5334 fl = locks_alloc_lock();
5335 if (!fl)
5336 return NULL;
5337 fl->fl_lmops = &nfsd_lease_mng_ops;
5338 fl->fl_flags = FL_DELEG;
5339 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5340 fl->fl_end = OFFSET_MAX;
5341 fl->fl_owner = (fl_owner_t)dp;
5342 fl->fl_pid = current->tgid;
5343 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5344 return fl;
5345 }
5346
nfsd4_check_conflicting_opens(struct nfs4_client * clp,struct nfs4_file * fp)5347 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5348 struct nfs4_file *fp)
5349 {
5350 struct nfs4_ol_stateid *st;
5351 struct file *f = fp->fi_deleg_file->nf_file;
5352 struct inode *ino = file_inode(f);
5353 int writes;
5354
5355 writes = atomic_read(&ino->i_writecount);
5356 if (!writes)
5357 return 0;
5358 /*
5359 * There could be multiple filehandles (hence multiple
5360 * nfs4_files) referencing this file, but that's not too
5361 * common; let's just give up in that case rather than
5362 * trying to go look up all the clients using that other
5363 * nfs4_file as well:
5364 */
5365 if (fp->fi_aliased)
5366 return -EAGAIN;
5367 /*
5368 * If there's a close in progress, make sure that we see it
5369 * clear any fi_fds[] entries before we see it decrement
5370 * i_writecount:
5371 */
5372 smp_mb__after_atomic();
5373
5374 if (fp->fi_fds[O_WRONLY])
5375 writes--;
5376 if (fp->fi_fds[O_RDWR])
5377 writes--;
5378 if (writes > 0)
5379 return -EAGAIN; /* There may be non-NFSv4 writers */
5380 /*
5381 * It's possible there are non-NFSv4 write opens in progress,
5382 * but if they haven't incremented i_writecount yet then they
5383 * also haven't called break lease yet; so, they'll break this
5384 * lease soon enough. So, all that's left to check for is NFSv4
5385 * opens:
5386 */
5387 spin_lock(&fp->fi_lock);
5388 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5389 if (st->st_openstp == NULL /* it's an open */ &&
5390 access_permit_write(st) &&
5391 st->st_stid.sc_client != clp) {
5392 spin_unlock(&fp->fi_lock);
5393 return -EAGAIN;
5394 }
5395 }
5396 spin_unlock(&fp->fi_lock);
5397 /*
5398 * There's a small chance that we could be racing with another
5399 * NFSv4 open. However, any open that hasn't added itself to
5400 * the fi_stateids list also hasn't called break_lease yet; so,
5401 * they'll break this lease soon enough.
5402 */
5403 return 0;
5404 }
5405
5406 /*
5407 * It's possible that between opening the dentry and setting the delegation,
5408 * that it has been renamed or unlinked. Redo the lookup to verify that this
5409 * hasn't happened.
5410 */
5411 static int
nfsd4_verify_deleg_dentry(struct nfsd4_open * open,struct nfs4_file * fp,struct svc_fh * parent)5412 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5413 struct svc_fh *parent)
5414 {
5415 struct svc_export *exp;
5416 struct dentry *child;
5417 __be32 err;
5418
5419 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5420 open->op_fname, open->op_fnamelen,
5421 &exp, &child);
5422
5423 if (err)
5424 return -EAGAIN;
5425
5426 exp_put(exp);
5427 dput(child);
5428 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5429 return -EAGAIN;
5430
5431 return 0;
5432 }
5433
5434 /*
5435 * We avoid breaking delegations held by a client due to its own activity, but
5436 * clearing setuid/setgid bits on a write is an implicit activity and the client
5437 * may not notice and continue using the old mode. Avoid giving out a delegation
5438 * on setuid/setgid files when the client is requesting an open for write.
5439 */
5440 static int
nfsd4_verify_setuid_write(struct nfsd4_open * open,struct nfsd_file * nf)5441 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5442 {
5443 struct inode *inode = file_inode(nf->nf_file);
5444
5445 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5446 (inode->i_mode & (S_ISUID|S_ISGID)))
5447 return -EAGAIN;
5448 return 0;
5449 }
5450
5451 static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * parent)5452 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5453 struct svc_fh *parent)
5454 {
5455 int status = 0;
5456 struct nfs4_client *clp = stp->st_stid.sc_client;
5457 struct nfs4_file *fp = stp->st_stid.sc_file;
5458 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5459 struct nfs4_delegation *dp;
5460 struct nfsd_file *nf = NULL;
5461 struct file_lock *fl;
5462 u32 dl_type;
5463
5464 /*
5465 * The fi_had_conflict and nfs_get_existing_delegation checks
5466 * here are just optimizations; we'll need to recheck them at
5467 * the end:
5468 */
5469 if (fp->fi_had_conflict)
5470 return ERR_PTR(-EAGAIN);
5471
5472 /*
5473 * Try for a write delegation first. RFC8881 section 10.4 says:
5474 *
5475 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
5476 * on its own, all opens."
5477 *
5478 * Furthermore the client can use a write delegation for most READ
5479 * operations as well, so we require a O_RDWR file here.
5480 *
5481 * Offer a write delegation in the case of a BOTH open, and ensure
5482 * we get the O_RDWR descriptor.
5483 */
5484 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
5485 nf = find_rw_file(fp);
5486 dl_type = NFS4_OPEN_DELEGATE_WRITE;
5487 }
5488
5489 /*
5490 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
5491 * file for some reason, then try for a read delegation instead.
5492 */
5493 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
5494 nf = find_readable_file(fp);
5495 dl_type = NFS4_OPEN_DELEGATE_READ;
5496 }
5497
5498 if (!nf)
5499 return ERR_PTR(-EAGAIN);
5500
5501 spin_lock(&state_lock);
5502 spin_lock(&fp->fi_lock);
5503 if (nfs4_delegation_exists(clp, fp))
5504 status = -EAGAIN;
5505 else if (nfsd4_verify_setuid_write(open, nf))
5506 status = -EAGAIN;
5507 else if (!fp->fi_deleg_file) {
5508 fp->fi_deleg_file = nf;
5509 /* increment early to prevent fi_deleg_file from being
5510 * cleared */
5511 fp->fi_delegees = 1;
5512 nf = NULL;
5513 } else
5514 fp->fi_delegees++;
5515 spin_unlock(&fp->fi_lock);
5516 spin_unlock(&state_lock);
5517 if (nf)
5518 nfsd_file_put(nf);
5519 if (status)
5520 return ERR_PTR(status);
5521
5522 status = -ENOMEM;
5523 dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5524 if (!dp)
5525 goto out_delegees;
5526
5527 fl = nfs4_alloc_init_lease(dp, dl_type);
5528 if (!fl)
5529 goto out_clnt_odstate;
5530
5531 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5532 if (fl)
5533 locks_free_lock(fl);
5534 if (status)
5535 goto out_clnt_odstate;
5536
5537 if (parent) {
5538 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5539 if (status)
5540 goto out_unlock;
5541 }
5542
5543 status = nfsd4_check_conflicting_opens(clp, fp);
5544 if (status)
5545 goto out_unlock;
5546
5547 /*
5548 * Now that the deleg is set, check again to ensure that nothing
5549 * raced in and changed the mode while we weren't lookng.
5550 */
5551 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5552 if (status)
5553 goto out_unlock;
5554
5555 status = -EAGAIN;
5556 if (fp->fi_had_conflict)
5557 goto out_unlock;
5558
5559 spin_lock(&state_lock);
5560 spin_lock(&fp->fi_lock);
5561 status = hash_delegation_locked(dp, fp);
5562 spin_unlock(&fp->fi_lock);
5563 spin_unlock(&state_lock);
5564
5565 if (status)
5566 goto out_unlock;
5567
5568 return dp;
5569 out_unlock:
5570 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5571 out_clnt_odstate:
5572 put_clnt_odstate(dp->dl_clnt_odstate);
5573 nfs4_put_stid(&dp->dl_stid);
5574 out_delegees:
5575 put_deleg_file(fp);
5576 return ERR_PTR(status);
5577 }
5578
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)5579 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5580 {
5581 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5582 if (status == -EAGAIN)
5583 open->op_why_no_deleg = WND4_CONTENTION;
5584 else {
5585 open->op_why_no_deleg = WND4_RESOURCE;
5586 switch (open->op_deleg_want) {
5587 case NFS4_SHARE_WANT_READ_DELEG:
5588 case NFS4_SHARE_WANT_WRITE_DELEG:
5589 case NFS4_SHARE_WANT_ANY_DELEG:
5590 break;
5591 case NFS4_SHARE_WANT_CANCEL:
5592 open->op_why_no_deleg = WND4_CANCELLED;
5593 break;
5594 case NFS4_SHARE_WANT_NO_DELEG:
5595 WARN_ON_ONCE(1);
5596 }
5597 }
5598 }
5599
5600 /*
5601 * The Linux NFS server does not offer write delegations to NFSv4.0
5602 * clients in order to avoid conflicts between write delegations and
5603 * GETATTRs requesting CHANGE or SIZE attributes.
5604 *
5605 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
5606 * begins each COMPOUND contains a client ID. Delegation recall can
5607 * be avoided when the server recognizes the client sending a
5608 * GETATTR also holds write delegation it conflicts with.
5609 *
5610 * However, the NFSv4.0 protocol does not enable a server to
5611 * determine that a GETATTR originated from the client holding the
5612 * conflicting delegation versus coming from some other client. Per
5613 * RFC 7530 Section 16.7.5, the server must recall or send a
5614 * CB_GETATTR even when the GETATTR originates from the client that
5615 * holds the conflicting delegation.
5616 *
5617 * An NFSv4.0 client can trigger a pathological situation if it
5618 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
5619 * the same COMPOUND. COMPOUND execution will always stop at the
5620 * GETATTR and the DELEGRETURN will never get executed. The server
5621 * eventually revokes the delegation, which can result in loss of
5622 * open or lock state.
5623 */
5624 static void
nfs4_open_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * currentfh)5625 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5626 struct svc_fh *currentfh)
5627 {
5628 struct nfs4_delegation *dp;
5629 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5630 struct nfs4_client *clp = stp->st_stid.sc_client;
5631 struct svc_fh *parent = NULL;
5632 int cb_up;
5633 int status = 0;
5634
5635 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5636 open->op_recall = 0;
5637 switch (open->op_claim_type) {
5638 case NFS4_OPEN_CLAIM_PREVIOUS:
5639 if (!cb_up)
5640 open->op_recall = 1;
5641 break;
5642 case NFS4_OPEN_CLAIM_NULL:
5643 parent = currentfh;
5644 fallthrough;
5645 case NFS4_OPEN_CLAIM_FH:
5646 /*
5647 * Let's not give out any delegations till everyone's
5648 * had the chance to reclaim theirs, *and* until
5649 * NLM locks have all been reclaimed:
5650 */
5651 if (locks_in_grace(clp->net))
5652 goto out_no_deleg;
5653 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5654 goto out_no_deleg;
5655 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
5656 !clp->cl_minorversion)
5657 goto out_no_deleg;
5658 break;
5659 default:
5660 goto out_no_deleg;
5661 }
5662 dp = nfs4_set_delegation(open, stp, parent);
5663 if (IS_ERR(dp))
5664 goto out_no_deleg;
5665
5666 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5667
5668 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
5669 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
5670 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
5671 } else {
5672 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5673 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5674 }
5675 nfs4_put_stid(&dp->dl_stid);
5676 return;
5677 out_no_deleg:
5678 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5679 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5680 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5681 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5682 open->op_recall = 1;
5683 }
5684
5685 /* 4.1 client asking for a delegation? */
5686 if (open->op_deleg_want)
5687 nfsd4_open_deleg_none_ext(open, status);
5688 return;
5689 }
5690
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)5691 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5692 struct nfs4_delegation *dp)
5693 {
5694 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5695 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5696 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5697 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5698 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5699 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5700 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5701 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5702 }
5703 /* Otherwise the client must be confused wanting a delegation
5704 * it already has, therefore we don't return
5705 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5706 */
5707 }
5708
5709 /**
5710 * nfsd4_process_open2 - finish open processing
5711 * @rqstp: the RPC transaction being executed
5712 * @current_fh: NFSv4 COMPOUND's current filehandle
5713 * @open: OPEN arguments
5714 *
5715 * If successful, (1) truncate the file if open->op_truncate was
5716 * set, (2) set open->op_stateid, (3) set open->op_delegation.
5717 *
5718 * Returns %nfs_ok on success; otherwise an nfs4stat value in
5719 * network byte order is returned.
5720 */
5721 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)5722 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5723 {
5724 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5725 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5726 struct nfs4_file *fp = NULL;
5727 struct nfs4_ol_stateid *stp = NULL;
5728 struct nfs4_delegation *dp = NULL;
5729 __be32 status;
5730 bool new_stp = false;
5731
5732 /*
5733 * Lookup file; if found, lookup stateid and check open request,
5734 * and check for delegations in the process of being recalled.
5735 * If not found, create the nfs4_file struct
5736 */
5737 fp = nfsd4_file_hash_insert(open->op_file, current_fh);
5738 if (unlikely(!fp))
5739 return nfserr_jukebox;
5740 if (fp != open->op_file) {
5741 status = nfs4_check_deleg(cl, open, &dp);
5742 if (status)
5743 goto out;
5744 stp = nfsd4_find_and_lock_existing_open(fp, open);
5745 } else {
5746 open->op_file = NULL;
5747 status = nfserr_bad_stateid;
5748 if (nfsd4_is_deleg_cur(open))
5749 goto out;
5750 }
5751
5752 if (!stp) {
5753 stp = init_open_stateid(fp, open);
5754 if (!open->op_stp)
5755 new_stp = true;
5756 }
5757
5758 /*
5759 * OPEN the file, or upgrade an existing OPEN.
5760 * If truncate fails, the OPEN fails.
5761 *
5762 * stp is already locked.
5763 */
5764 if (!new_stp) {
5765 /* Stateid was found, this is an OPEN upgrade */
5766 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5767 if (status) {
5768 mutex_unlock(&stp->st_mutex);
5769 goto out;
5770 }
5771 } else {
5772 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
5773 if (status) {
5774 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5775 release_open_stateid(stp);
5776 mutex_unlock(&stp->st_mutex);
5777 goto out;
5778 }
5779
5780 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5781 open->op_odstate);
5782 if (stp->st_clnt_odstate == open->op_odstate)
5783 open->op_odstate = NULL;
5784 }
5785
5786 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5787 mutex_unlock(&stp->st_mutex);
5788
5789 if (nfsd4_has_session(&resp->cstate)) {
5790 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5791 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5792 open->op_why_no_deleg = WND4_NOT_WANTED;
5793 goto nodeleg;
5794 }
5795 }
5796
5797 /*
5798 * Attempt to hand out a delegation. No error return, because the
5799 * OPEN succeeds even if we fail.
5800 */
5801 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
5802 nodeleg:
5803 status = nfs_ok;
5804 trace_nfsd_open(&stp->st_stid.sc_stateid);
5805 out:
5806 /* 4.1 client trying to upgrade/downgrade delegation? */
5807 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5808 open->op_deleg_want)
5809 nfsd4_deleg_xgrade_none_ext(open, dp);
5810
5811 if (fp)
5812 put_nfs4_file(fp);
5813 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5814 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5815 /*
5816 * To finish the open response, we just need to set the rflags.
5817 */
5818 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5819 if (nfsd4_has_session(&resp->cstate))
5820 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5821 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5822 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5823
5824 if (dp)
5825 nfs4_put_stid(&dp->dl_stid);
5826 if (stp)
5827 nfs4_put_stid(&stp->st_stid);
5828
5829 return status;
5830 }
5831
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)5832 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5833 struct nfsd4_open *open)
5834 {
5835 if (open->op_openowner) {
5836 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5837
5838 nfsd4_cstate_assign_replay(cstate, so);
5839 nfs4_put_stateowner(so);
5840 }
5841 if (open->op_file)
5842 kmem_cache_free(file_slab, open->op_file);
5843 if (open->op_stp)
5844 nfs4_put_stid(&open->op_stp->st_stid);
5845 if (open->op_odstate)
5846 kmem_cache_free(odstate_slab, open->op_odstate);
5847 }
5848
5849 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5850 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5851 union nfsd4_op_u *u)
5852 {
5853 clientid_t *clid = &u->renew;
5854 struct nfs4_client *clp;
5855 __be32 status;
5856 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5857
5858 trace_nfsd_clid_renew(clid);
5859 status = set_client(clid, cstate, nn);
5860 if (status)
5861 return status;
5862 clp = cstate->clp;
5863 if (!list_empty(&clp->cl_delegations)
5864 && clp->cl_cb_state != NFSD4_CB_UP)
5865 return nfserr_cb_path_down;
5866 return nfs_ok;
5867 }
5868
5869 void
nfsd4_end_grace(struct nfsd_net * nn)5870 nfsd4_end_grace(struct nfsd_net *nn)
5871 {
5872 /* do nothing if grace period already ended */
5873 if (nn->grace_ended)
5874 return;
5875
5876 trace_nfsd_grace_complete(nn);
5877 nn->grace_ended = true;
5878 /*
5879 * If the server goes down again right now, an NFSv4
5880 * client will still be allowed to reclaim after it comes back up,
5881 * even if it hasn't yet had a chance to reclaim state this time.
5882 *
5883 */
5884 nfsd4_record_grace_done(nn);
5885 /*
5886 * At this point, NFSv4 clients can still reclaim. But if the
5887 * server crashes, any that have not yet reclaimed will be out
5888 * of luck on the next boot.
5889 *
5890 * (NFSv4.1+ clients are considered to have reclaimed once they
5891 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5892 * have reclaimed after their first OPEN.)
5893 */
5894 locks_end_grace(&nn->nfsd4_manager);
5895 /*
5896 * At this point, and once lockd and/or any other containers
5897 * exit their grace period, further reclaims will fail and
5898 * regular locking can resume.
5899 */
5900 }
5901
5902 /*
5903 * If we've waited a lease period but there are still clients trying to
5904 * reclaim, wait a little longer to give them a chance to finish.
5905 */
clients_still_reclaiming(struct nfsd_net * nn)5906 static bool clients_still_reclaiming(struct nfsd_net *nn)
5907 {
5908 time64_t double_grace_period_end = nn->boot_time +
5909 2 * nn->nfsd4_lease;
5910
5911 if (nn->track_reclaim_completes &&
5912 atomic_read(&nn->nr_reclaim_complete) ==
5913 nn->reclaim_str_hashtbl_size)
5914 return false;
5915 if (!nn->somebody_reclaimed)
5916 return false;
5917 nn->somebody_reclaimed = false;
5918 /*
5919 * If we've given them *two* lease times to reclaim, and they're
5920 * still not done, give up:
5921 */
5922 if (ktime_get_boottime_seconds() > double_grace_period_end)
5923 return false;
5924 return true;
5925 }
5926
5927 struct laundry_time {
5928 time64_t cutoff;
5929 time64_t new_timeo;
5930 };
5931
state_expired(struct laundry_time * lt,time64_t last_refresh)5932 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5933 {
5934 time64_t time_remaining;
5935
5936 if (last_refresh < lt->cutoff)
5937 return true;
5938 time_remaining = last_refresh - lt->cutoff;
5939 lt->new_timeo = min(lt->new_timeo, time_remaining);
5940 return false;
5941 }
5942
5943 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(struct nfsd_net * nn)5944 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5945 {
5946 spin_lock_init(&nn->nfsd_ssc_lock);
5947 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5948 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5949 }
5950 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5951
5952 /*
5953 * This is called when nfsd is being shutdown, after all inter_ssc
5954 * cleanup were done, to destroy the ssc delayed unmount list.
5955 */
nfsd4_ssc_shutdown_umount(struct nfsd_net * nn)5956 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5957 {
5958 struct nfsd4_ssc_umount_item *ni = NULL;
5959 struct nfsd4_ssc_umount_item *tmp;
5960
5961 spin_lock(&nn->nfsd_ssc_lock);
5962 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5963 list_del(&ni->nsui_list);
5964 spin_unlock(&nn->nfsd_ssc_lock);
5965 mntput(ni->nsui_vfsmount);
5966 kfree(ni);
5967 spin_lock(&nn->nfsd_ssc_lock);
5968 }
5969 spin_unlock(&nn->nfsd_ssc_lock);
5970 }
5971
nfsd4_ssc_expire_umount(struct nfsd_net * nn)5972 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5973 {
5974 bool do_wakeup = false;
5975 struct nfsd4_ssc_umount_item *ni = NULL;
5976 struct nfsd4_ssc_umount_item *tmp;
5977
5978 spin_lock(&nn->nfsd_ssc_lock);
5979 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5980 if (time_after(jiffies, ni->nsui_expire)) {
5981 if (refcount_read(&ni->nsui_refcnt) > 1)
5982 continue;
5983
5984 /* mark being unmount */
5985 ni->nsui_busy = true;
5986 spin_unlock(&nn->nfsd_ssc_lock);
5987 mntput(ni->nsui_vfsmount);
5988 spin_lock(&nn->nfsd_ssc_lock);
5989
5990 /* waiters need to start from begin of list */
5991 list_del(&ni->nsui_list);
5992 kfree(ni);
5993
5994 /* wakeup ssc_connect waiters */
5995 do_wakeup = true;
5996 continue;
5997 }
5998 break;
5999 }
6000 if (do_wakeup)
6001 wake_up_all(&nn->nfsd_ssc_waitq);
6002 spin_unlock(&nn->nfsd_ssc_lock);
6003 }
6004 #endif
6005
6006 /* Check if any lock belonging to this lockowner has any blockers */
6007 static bool
nfs4_lockowner_has_blockers(struct nfs4_lockowner * lo)6008 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
6009 {
6010 struct file_lock_context *ctx;
6011 struct nfs4_ol_stateid *stp;
6012 struct nfs4_file *nf;
6013
6014 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
6015 nf = stp->st_stid.sc_file;
6016 ctx = locks_inode_context(nf->fi_inode);
6017 if (!ctx)
6018 continue;
6019 if (locks_owner_has_blockers(ctx, lo))
6020 return true;
6021 }
6022 return false;
6023 }
6024
6025 static bool
nfs4_anylock_blockers(struct nfs4_client * clp)6026 nfs4_anylock_blockers(struct nfs4_client *clp)
6027 {
6028 int i;
6029 struct nfs4_stateowner *so;
6030 struct nfs4_lockowner *lo;
6031
6032 if (atomic_read(&clp->cl_delegs_in_recall))
6033 return true;
6034 spin_lock(&clp->cl_lock);
6035 for (i = 0; i < OWNER_HASH_SIZE; i++) {
6036 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6037 so_strhash) {
6038 if (so->so_is_open_owner)
6039 continue;
6040 lo = lockowner(so);
6041 if (nfs4_lockowner_has_blockers(lo)) {
6042 spin_unlock(&clp->cl_lock);
6043 return true;
6044 }
6045 }
6046 }
6047 spin_unlock(&clp->cl_lock);
6048 return false;
6049 }
6050
6051 static void
nfs4_get_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist,struct laundry_time * lt)6052 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
6053 struct laundry_time *lt)
6054 {
6055 unsigned int maxreap, reapcnt = 0;
6056 struct list_head *pos, *next;
6057 struct nfs4_client *clp;
6058
6059 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
6060 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
6061 INIT_LIST_HEAD(reaplist);
6062 spin_lock(&nn->client_lock);
6063 list_for_each_safe(pos, next, &nn->client_lru) {
6064 clp = list_entry(pos, struct nfs4_client, cl_lru);
6065 if (clp->cl_state == NFSD4_EXPIRABLE)
6066 goto exp_client;
6067 if (!state_expired(lt, clp->cl_time))
6068 break;
6069 if (!atomic_read(&clp->cl_rpc_users)) {
6070 if (clp->cl_state == NFSD4_ACTIVE)
6071 atomic_inc(&nn->nfsd_courtesy_clients);
6072 clp->cl_state = NFSD4_COURTESY;
6073 }
6074 if (!client_has_state(clp))
6075 goto exp_client;
6076 if (!nfs4_anylock_blockers(clp))
6077 if (reapcnt >= maxreap)
6078 continue;
6079 exp_client:
6080 if (!mark_client_expired_locked(clp)) {
6081 list_add(&clp->cl_lru, reaplist);
6082 reapcnt++;
6083 }
6084 }
6085 spin_unlock(&nn->client_lock);
6086 }
6087
6088 static void
nfs4_get_courtesy_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist)6089 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6090 struct list_head *reaplist)
6091 {
6092 unsigned int maxreap = 0, reapcnt = 0;
6093 struct list_head *pos, *next;
6094 struct nfs4_client *clp;
6095
6096 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6097 INIT_LIST_HEAD(reaplist);
6098
6099 spin_lock(&nn->client_lock);
6100 list_for_each_safe(pos, next, &nn->client_lru) {
6101 clp = list_entry(pos, struct nfs4_client, cl_lru);
6102 if (clp->cl_state == NFSD4_ACTIVE)
6103 break;
6104 if (reapcnt >= maxreap)
6105 break;
6106 if (!mark_client_expired_locked(clp)) {
6107 list_add(&clp->cl_lru, reaplist);
6108 reapcnt++;
6109 }
6110 }
6111 spin_unlock(&nn->client_lock);
6112 }
6113
6114 static void
nfs4_process_client_reaplist(struct list_head * reaplist)6115 nfs4_process_client_reaplist(struct list_head *reaplist)
6116 {
6117 struct list_head *pos, *next;
6118 struct nfs4_client *clp;
6119
6120 list_for_each_safe(pos, next, reaplist) {
6121 clp = list_entry(pos, struct nfs4_client, cl_lru);
6122 trace_nfsd_clid_purged(&clp->cl_clientid);
6123 list_del_init(&clp->cl_lru);
6124 expire_client(clp);
6125 }
6126 }
6127
6128 static time64_t
nfs4_laundromat(struct nfsd_net * nn)6129 nfs4_laundromat(struct nfsd_net *nn)
6130 {
6131 struct nfs4_openowner *oo;
6132 struct nfs4_delegation *dp;
6133 struct nfs4_ol_stateid *stp;
6134 struct nfsd4_blocked_lock *nbl;
6135 struct list_head *pos, *next, reaplist;
6136 struct laundry_time lt = {
6137 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6138 .new_timeo = nn->nfsd4_lease
6139 };
6140 struct nfs4_cpntf_state *cps;
6141 copy_stateid_t *cps_t;
6142 int i;
6143
6144 if (clients_still_reclaiming(nn)) {
6145 lt.new_timeo = 0;
6146 goto out;
6147 }
6148 nfsd4_end_grace(nn);
6149
6150 spin_lock(&nn->s2s_cp_lock);
6151 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6152 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6153 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6154 state_expired(<, cps->cpntf_time))
6155 _free_cpntf_state_locked(nn, cps);
6156 }
6157 spin_unlock(&nn->s2s_cp_lock);
6158 nfs4_get_client_reaplist(nn, &reaplist, <);
6159 nfs4_process_client_reaplist(&reaplist);
6160
6161 spin_lock(&state_lock);
6162 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6163 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6164 if (!state_expired(<, dp->dl_time))
6165 break;
6166 WARN_ON(!unhash_delegation_locked(dp));
6167 list_add(&dp->dl_recall_lru, &reaplist);
6168 }
6169 spin_unlock(&state_lock);
6170 while (!list_empty(&reaplist)) {
6171 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6172 dl_recall_lru);
6173 list_del_init(&dp->dl_recall_lru);
6174 revoke_delegation(dp);
6175 }
6176
6177 spin_lock(&nn->client_lock);
6178 while (!list_empty(&nn->close_lru)) {
6179 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6180 oo_close_lru);
6181 if (!state_expired(<, oo->oo_time))
6182 break;
6183 list_del_init(&oo->oo_close_lru);
6184 stp = oo->oo_last_closed_stid;
6185 oo->oo_last_closed_stid = NULL;
6186 spin_unlock(&nn->client_lock);
6187 nfs4_put_stid(&stp->st_stid);
6188 spin_lock(&nn->client_lock);
6189 }
6190 spin_unlock(&nn->client_lock);
6191
6192 /*
6193 * It's possible for a client to try and acquire an already held lock
6194 * that is being held for a long time, and then lose interest in it.
6195 * So, we clean out any un-revisited request after a lease period
6196 * under the assumption that the client is no longer interested.
6197 *
6198 * RFC5661, sec. 9.6 states that the client must not rely on getting
6199 * notifications and must continue to poll for locks, even when the
6200 * server supports them. Thus this shouldn't lead to clients blocking
6201 * indefinitely once the lock does become free.
6202 */
6203 BUG_ON(!list_empty(&reaplist));
6204 spin_lock(&nn->blocked_locks_lock);
6205 while (!list_empty(&nn->blocked_locks_lru)) {
6206 nbl = list_first_entry(&nn->blocked_locks_lru,
6207 struct nfsd4_blocked_lock, nbl_lru);
6208 if (!state_expired(<, nbl->nbl_time))
6209 break;
6210 list_move(&nbl->nbl_lru, &reaplist);
6211 list_del_init(&nbl->nbl_list);
6212 }
6213 spin_unlock(&nn->blocked_locks_lock);
6214
6215 while (!list_empty(&reaplist)) {
6216 nbl = list_first_entry(&reaplist,
6217 struct nfsd4_blocked_lock, nbl_lru);
6218 list_del_init(&nbl->nbl_lru);
6219 free_blocked_lock(nbl);
6220 }
6221 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
6222 /* service the server-to-server copy delayed unmount list */
6223 nfsd4_ssc_expire_umount(nn);
6224 #endif
6225 out:
6226 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6227 }
6228
6229 static void laundromat_main(struct work_struct *);
6230
6231 static void
laundromat_main(struct work_struct * laundry)6232 laundromat_main(struct work_struct *laundry)
6233 {
6234 time64_t t;
6235 struct delayed_work *dwork = to_delayed_work(laundry);
6236 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6237 laundromat_work);
6238
6239 t = nfs4_laundromat(nn);
6240 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6241 }
6242
6243 static void
courtesy_client_reaper(struct nfsd_net * nn)6244 courtesy_client_reaper(struct nfsd_net *nn)
6245 {
6246 struct list_head reaplist;
6247
6248 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6249 nfs4_process_client_reaplist(&reaplist);
6250 }
6251
6252 static void
deleg_reaper(struct nfsd_net * nn)6253 deleg_reaper(struct nfsd_net *nn)
6254 {
6255 struct list_head *pos, *next;
6256 struct nfs4_client *clp;
6257 struct list_head cblist;
6258
6259 INIT_LIST_HEAD(&cblist);
6260 spin_lock(&nn->client_lock);
6261 list_for_each_safe(pos, next, &nn->client_lru) {
6262 clp = list_entry(pos, struct nfs4_client, cl_lru);
6263 if (clp->cl_state != NFSD4_ACTIVE ||
6264 list_empty(&clp->cl_delegations) ||
6265 atomic_read(&clp->cl_delegs_in_recall) ||
6266 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6267 (ktime_get_boottime_seconds() -
6268 clp->cl_ra_time < 5)) {
6269 continue;
6270 }
6271 list_add(&clp->cl_ra_cblist, &cblist);
6272
6273 /* release in nfsd4_cb_recall_any_release */
6274 kref_get(&clp->cl_nfsdfs.cl_ref);
6275 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6276 clp->cl_ra_time = ktime_get_boottime_seconds();
6277 }
6278 spin_unlock(&nn->client_lock);
6279
6280 while (!list_empty(&cblist)) {
6281 clp = list_first_entry(&cblist, struct nfs4_client,
6282 cl_ra_cblist);
6283 list_del_init(&clp->cl_ra_cblist);
6284 clp->cl_ra->ra_keep = 0;
6285 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
6286 trace_nfsd_cb_recall_any(clp->cl_ra);
6287 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6288 }
6289 }
6290
6291 static void
nfsd4_state_shrinker_worker(struct work_struct * work)6292 nfsd4_state_shrinker_worker(struct work_struct *work)
6293 {
6294 struct nfsd_net *nn = container_of(work, struct nfsd_net,
6295 nfsd_shrinker_work);
6296
6297 courtesy_client_reaper(nn);
6298 deleg_reaper(nn);
6299 }
6300
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)6301 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6302 {
6303 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6304 return nfserr_bad_stateid;
6305 return nfs_ok;
6306 }
6307
6308 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)6309 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6310 {
6311 __be32 status = nfserr_openmode;
6312
6313 /* For lock stateid's, we test the parent open, not the lock: */
6314 if (stp->st_openstp)
6315 stp = stp->st_openstp;
6316 if ((flags & WR_STATE) && !access_permit_write(stp))
6317 goto out;
6318 if ((flags & RD_STATE) && !access_permit_read(stp))
6319 goto out;
6320 status = nfs_ok;
6321 out:
6322 return status;
6323 }
6324
6325 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)6326 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6327 {
6328 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6329 return nfs_ok;
6330 else if (opens_in_grace(net)) {
6331 /* Answer in remaining cases depends on existence of
6332 * conflicting state; so we must wait out the grace period. */
6333 return nfserr_grace;
6334 } else if (flags & WR_STATE)
6335 return nfs4_share_conflict(current_fh,
6336 NFS4_SHARE_DENY_WRITE);
6337 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6338 return nfs4_share_conflict(current_fh,
6339 NFS4_SHARE_DENY_READ);
6340 }
6341
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)6342 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6343 {
6344 /*
6345 * When sessions are used the stateid generation number is ignored
6346 * when it is zero.
6347 */
6348 if (has_session && in->si_generation == 0)
6349 return nfs_ok;
6350
6351 if (in->si_generation == ref->si_generation)
6352 return nfs_ok;
6353
6354 /* If the client sends us a stateid from the future, it's buggy: */
6355 if (nfsd4_stateid_generation_after(in, ref))
6356 return nfserr_bad_stateid;
6357 /*
6358 * However, we could see a stateid from the past, even from a
6359 * non-buggy client. For example, if the client sends a lock
6360 * while some IO is outstanding, the lock may bump si_generation
6361 * while the IO is still in flight. The client could avoid that
6362 * situation by waiting for responses on all the IO requests,
6363 * but better performance may result in retrying IO that
6364 * receives an old_stateid error if requests are rarely
6365 * reordered in flight:
6366 */
6367 return nfserr_old_stateid;
6368 }
6369
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)6370 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6371 {
6372 __be32 ret;
6373
6374 spin_lock(&s->sc_lock);
6375 ret = nfsd4_verify_open_stid(s);
6376 if (ret == nfs_ok)
6377 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6378 spin_unlock(&s->sc_lock);
6379 return ret;
6380 }
6381
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)6382 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6383 {
6384 if (ols->st_stateowner->so_is_open_owner &&
6385 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6386 return nfserr_bad_stateid;
6387 return nfs_ok;
6388 }
6389
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)6390 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6391 {
6392 struct nfs4_stid *s;
6393 __be32 status = nfserr_bad_stateid;
6394
6395 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6396 CLOSE_STATEID(stateid))
6397 return status;
6398 spin_lock(&cl->cl_lock);
6399 s = find_stateid_locked(cl, stateid);
6400 if (!s)
6401 goto out_unlock;
6402 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6403 if (status)
6404 goto out_unlock;
6405 switch (s->sc_type) {
6406 case NFS4_DELEG_STID:
6407 status = nfs_ok;
6408 break;
6409 case NFS4_REVOKED_DELEG_STID:
6410 status = nfserr_deleg_revoked;
6411 break;
6412 case NFS4_OPEN_STID:
6413 case NFS4_LOCK_STID:
6414 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6415 break;
6416 default:
6417 printk("unknown stateid type %x\n", s->sc_type);
6418 fallthrough;
6419 case NFS4_CLOSED_STID:
6420 case NFS4_CLOSED_DELEG_STID:
6421 status = nfserr_bad_stateid;
6422 }
6423 out_unlock:
6424 spin_unlock(&cl->cl_lock);
6425 return status;
6426 }
6427
6428 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned char typemask,struct nfs4_stid ** s,struct nfsd_net * nn)6429 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6430 stateid_t *stateid, unsigned char typemask,
6431 struct nfs4_stid **s, struct nfsd_net *nn)
6432 {
6433 __be32 status;
6434 struct nfs4_stid *stid;
6435 bool return_revoked = false;
6436
6437 /*
6438 * only return revoked delegations if explicitly asked.
6439 * otherwise we report revoked or bad_stateid status.
6440 */
6441 if (typemask & NFS4_REVOKED_DELEG_STID)
6442 return_revoked = true;
6443 else if (typemask & NFS4_DELEG_STID)
6444 typemask |= NFS4_REVOKED_DELEG_STID;
6445
6446 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6447 CLOSE_STATEID(stateid))
6448 return nfserr_bad_stateid;
6449 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6450 if (status == nfserr_stale_clientid) {
6451 if (cstate->session)
6452 return nfserr_bad_stateid;
6453 return nfserr_stale_stateid;
6454 }
6455 if (status)
6456 return status;
6457 stid = find_stateid_by_type(cstate->clp, stateid, typemask);
6458 if (!stid)
6459 return nfserr_bad_stateid;
6460 if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6461 nfs4_put_stid(stid);
6462 if (cstate->minorversion)
6463 return nfserr_deleg_revoked;
6464 return nfserr_bad_stateid;
6465 }
6466 *s = stid;
6467 return nfs_ok;
6468 }
6469
6470 static struct nfsd_file *
nfs4_find_file(struct nfs4_stid * s,int flags)6471 nfs4_find_file(struct nfs4_stid *s, int flags)
6472 {
6473 struct nfsd_file *ret = NULL;
6474
6475 if (!s)
6476 return NULL;
6477
6478 switch (s->sc_type) {
6479 case NFS4_DELEG_STID:
6480 spin_lock(&s->sc_file->fi_lock);
6481 ret = nfsd_file_get(s->sc_file->fi_deleg_file);
6482 spin_unlock(&s->sc_file->fi_lock);
6483 break;
6484 case NFS4_OPEN_STID:
6485 case NFS4_LOCK_STID:
6486 if (flags & RD_STATE)
6487 ret = find_readable_file(s->sc_file);
6488 else
6489 ret = find_writeable_file(s->sc_file);
6490 }
6491
6492 return ret;
6493 }
6494
6495 static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid * ols,int flags)6496 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6497 {
6498 __be32 status;
6499
6500 status = nfsd4_check_openowner_confirmed(ols);
6501 if (status)
6502 return status;
6503 return nfs4_check_openmode(ols, flags);
6504 }
6505
6506 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct nfsd_file ** nfp,int flags)6507 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6508 struct nfsd_file **nfp, int flags)
6509 {
6510 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6511 struct nfsd_file *nf;
6512 __be32 status;
6513
6514 nf = nfs4_find_file(s, flags);
6515 if (nf) {
6516 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6517 acc | NFSD_MAY_OWNER_OVERRIDE);
6518 if (status) {
6519 nfsd_file_put(nf);
6520 goto out;
6521 }
6522 } else {
6523 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6524 if (status)
6525 return status;
6526 }
6527 *nfp = nf;
6528 out:
6529 return status;
6530 }
6531 static void
_free_cpntf_state_locked(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)6532 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6533 {
6534 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6535 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6536 return;
6537 list_del(&cps->cp_list);
6538 idr_remove(&nn->s2s_cp_stateids,
6539 cps->cp_stateid.cs_stid.si_opaque.so_id);
6540 kfree(cps);
6541 }
6542 /*
6543 * A READ from an inter server to server COPY will have a
6544 * copy stateid. Look up the copy notify stateid from the
6545 * idr structure and take a reference on it.
6546 */
manage_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_client * clp,struct nfs4_cpntf_state ** cps)6547 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6548 struct nfs4_client *clp,
6549 struct nfs4_cpntf_state **cps)
6550 {
6551 copy_stateid_t *cps_t;
6552 struct nfs4_cpntf_state *state = NULL;
6553
6554 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6555 return nfserr_bad_stateid;
6556 spin_lock(&nn->s2s_cp_lock);
6557 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6558 if (cps_t) {
6559 state = container_of(cps_t, struct nfs4_cpntf_state,
6560 cp_stateid);
6561 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6562 state = NULL;
6563 goto unlock;
6564 }
6565 if (!clp)
6566 refcount_inc(&state->cp_stateid.cs_count);
6567 else
6568 _free_cpntf_state_locked(nn, state);
6569 }
6570 unlock:
6571 spin_unlock(&nn->s2s_cp_lock);
6572 if (!state)
6573 return nfserr_bad_stateid;
6574 if (!clp && state)
6575 *cps = state;
6576 return 0;
6577 }
6578
find_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_stid ** stid)6579 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6580 struct nfs4_stid **stid)
6581 {
6582 __be32 status;
6583 struct nfs4_cpntf_state *cps = NULL;
6584 struct nfs4_client *found;
6585
6586 status = manage_cpntf_state(nn, st, NULL, &cps);
6587 if (status)
6588 return status;
6589
6590 cps->cpntf_time = ktime_get_boottime_seconds();
6591
6592 status = nfserr_expired;
6593 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6594 if (!found)
6595 goto out;
6596
6597 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6598 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6599 if (*stid)
6600 status = nfs_ok;
6601 else
6602 status = nfserr_bad_stateid;
6603
6604 put_client_renew(found);
6605 out:
6606 nfs4_put_cpntf_state(nn, cps);
6607 return status;
6608 }
6609
nfs4_put_cpntf_state(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)6610 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6611 {
6612 spin_lock(&nn->s2s_cp_lock);
6613 _free_cpntf_state_locked(nn, cps);
6614 spin_unlock(&nn->s2s_cp_lock);
6615 }
6616
6617 /**
6618 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
6619 * @rqstp: incoming request from client
6620 * @cstate: current compound state
6621 * @fhp: filehandle associated with requested stateid
6622 * @stateid: stateid (provided by client)
6623 * @flags: flags describing type of operation to be done
6624 * @nfp: optional nfsd_file return pointer (may be NULL)
6625 * @cstid: optional returned nfs4_stid pointer (may be NULL)
6626 *
6627 * Given info from the client, look up a nfs4_stid for the operation. On
6628 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
6629 * associated with it.
6630 */
6631 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct nfsd_file ** nfp,struct nfs4_stid ** cstid)6632 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6633 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6634 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6635 struct nfs4_stid **cstid)
6636 {
6637 struct net *net = SVC_NET(rqstp);
6638 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6639 struct nfs4_stid *s = NULL;
6640 __be32 status;
6641
6642 if (nfp)
6643 *nfp = NULL;
6644
6645 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6646 if (cstid)
6647 status = nfserr_bad_stateid;
6648 else
6649 status = check_special_stateids(net, fhp, stateid,
6650 flags);
6651 goto done;
6652 }
6653
6654 status = nfsd4_lookup_stateid(cstate, stateid,
6655 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6656 &s, nn);
6657 if (status == nfserr_bad_stateid)
6658 status = find_cpntf_state(nn, stateid, &s);
6659 if (status)
6660 return status;
6661 status = nfsd4_stid_check_stateid_generation(stateid, s,
6662 nfsd4_has_session(cstate));
6663 if (status)
6664 goto out;
6665
6666 switch (s->sc_type) {
6667 case NFS4_DELEG_STID:
6668 status = nfs4_check_delegmode(delegstateid(s), flags);
6669 break;
6670 case NFS4_OPEN_STID:
6671 case NFS4_LOCK_STID:
6672 status = nfs4_check_olstateid(openlockstateid(s), flags);
6673 break;
6674 default:
6675 status = nfserr_bad_stateid;
6676 break;
6677 }
6678 if (status)
6679 goto out;
6680 status = nfs4_check_fh(fhp, s);
6681
6682 done:
6683 if (status == nfs_ok && nfp)
6684 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6685 out:
6686 if (s) {
6687 if (!status && cstid)
6688 *cstid = s;
6689 else
6690 nfs4_put_stid(s);
6691 }
6692 return status;
6693 }
6694
6695 /*
6696 * Test if the stateid is valid
6697 */
6698 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6699 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6700 union nfsd4_op_u *u)
6701 {
6702 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6703 struct nfsd4_test_stateid_id *stateid;
6704 struct nfs4_client *cl = cstate->clp;
6705
6706 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6707 stateid->ts_id_status =
6708 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6709
6710 return nfs_ok;
6711 }
6712
6713 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)6714 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6715 {
6716 struct nfs4_ol_stateid *stp = openlockstateid(s);
6717 __be32 ret;
6718
6719 ret = nfsd4_lock_ol_stateid(stp);
6720 if (ret)
6721 goto out_put_stid;
6722
6723 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6724 if (ret)
6725 goto out;
6726
6727 ret = nfserr_locks_held;
6728 if (check_for_locks(stp->st_stid.sc_file,
6729 lockowner(stp->st_stateowner)))
6730 goto out;
6731
6732 release_lock_stateid(stp);
6733 ret = nfs_ok;
6734
6735 out:
6736 mutex_unlock(&stp->st_mutex);
6737 out_put_stid:
6738 nfs4_put_stid(s);
6739 return ret;
6740 }
6741
6742 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6743 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6744 union nfsd4_op_u *u)
6745 {
6746 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6747 stateid_t *stateid = &free_stateid->fr_stateid;
6748 struct nfs4_stid *s;
6749 struct nfs4_delegation *dp;
6750 struct nfs4_client *cl = cstate->clp;
6751 __be32 ret = nfserr_bad_stateid;
6752
6753 spin_lock(&cl->cl_lock);
6754 s = find_stateid_locked(cl, stateid);
6755 if (!s)
6756 goto out_unlock;
6757 spin_lock(&s->sc_lock);
6758 switch (s->sc_type) {
6759 case NFS4_DELEG_STID:
6760 ret = nfserr_locks_held;
6761 break;
6762 case NFS4_OPEN_STID:
6763 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6764 if (ret)
6765 break;
6766 ret = nfserr_locks_held;
6767 break;
6768 case NFS4_LOCK_STID:
6769 spin_unlock(&s->sc_lock);
6770 refcount_inc(&s->sc_count);
6771 spin_unlock(&cl->cl_lock);
6772 ret = nfsd4_free_lock_stateid(stateid, s);
6773 goto out;
6774 case NFS4_REVOKED_DELEG_STID:
6775 spin_unlock(&s->sc_lock);
6776 dp = delegstateid(s);
6777 list_del_init(&dp->dl_recall_lru);
6778 spin_unlock(&cl->cl_lock);
6779 nfs4_put_stid(s);
6780 ret = nfs_ok;
6781 goto out;
6782 /* Default falls through and returns nfserr_bad_stateid */
6783 }
6784 spin_unlock(&s->sc_lock);
6785 out_unlock:
6786 spin_unlock(&cl->cl_lock);
6787 out:
6788 return ret;
6789 }
6790
6791 static inline int
setlkflg(int type)6792 setlkflg (int type)
6793 {
6794 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6795 RD_STATE : WR_STATE;
6796 }
6797
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)6798 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6799 {
6800 struct svc_fh *current_fh = &cstate->current_fh;
6801 struct nfs4_stateowner *sop = stp->st_stateowner;
6802 __be32 status;
6803
6804 status = nfsd4_check_seqid(cstate, sop, seqid);
6805 if (status)
6806 return status;
6807 status = nfsd4_lock_ol_stateid(stp);
6808 if (status != nfs_ok)
6809 return status;
6810 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6811 if (status == nfs_ok)
6812 status = nfs4_check_fh(current_fh, &stp->st_stid);
6813 if (status != nfs_ok)
6814 mutex_unlock(&stp->st_mutex);
6815 return status;
6816 }
6817
6818 /**
6819 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
6820 * @cstate: compund state
6821 * @seqid: seqid (provided by client)
6822 * @stateid: stateid (provided by client)
6823 * @typemask: mask of allowable types for this operation
6824 * @stpp: return pointer for the stateid found
6825 * @nn: net namespace for request
6826 *
6827 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
6828 * return it in @stpp. On a nfs_ok return, the returned stateid will
6829 * have its st_mutex locked.
6830 */
6831 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,char typemask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6832 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6833 stateid_t *stateid, char typemask,
6834 struct nfs4_ol_stateid **stpp,
6835 struct nfsd_net *nn)
6836 {
6837 __be32 status;
6838 struct nfs4_stid *s;
6839 struct nfs4_ol_stateid *stp = NULL;
6840
6841 trace_nfsd_preprocess(seqid, stateid);
6842
6843 *stpp = NULL;
6844 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6845 if (status)
6846 return status;
6847 stp = openlockstateid(s);
6848 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6849
6850 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6851 if (!status)
6852 *stpp = stp;
6853 else
6854 nfs4_put_stid(&stp->st_stid);
6855 return status;
6856 }
6857
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6858 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6859 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6860 {
6861 __be32 status;
6862 struct nfs4_openowner *oo;
6863 struct nfs4_ol_stateid *stp;
6864
6865 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6866 NFS4_OPEN_STID, &stp, nn);
6867 if (status)
6868 return status;
6869 oo = openowner(stp->st_stateowner);
6870 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6871 mutex_unlock(&stp->st_mutex);
6872 nfs4_put_stid(&stp->st_stid);
6873 return nfserr_bad_stateid;
6874 }
6875 *stpp = stp;
6876 return nfs_ok;
6877 }
6878
6879 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6880 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6881 union nfsd4_op_u *u)
6882 {
6883 struct nfsd4_open_confirm *oc = &u->open_confirm;
6884 __be32 status;
6885 struct nfs4_openowner *oo;
6886 struct nfs4_ol_stateid *stp;
6887 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6888
6889 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6890 cstate->current_fh.fh_dentry);
6891
6892 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6893 if (status)
6894 return status;
6895
6896 status = nfs4_preprocess_seqid_op(cstate,
6897 oc->oc_seqid, &oc->oc_req_stateid,
6898 NFS4_OPEN_STID, &stp, nn);
6899 if (status)
6900 goto out;
6901 oo = openowner(stp->st_stateowner);
6902 status = nfserr_bad_stateid;
6903 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6904 mutex_unlock(&stp->st_mutex);
6905 goto put_stateid;
6906 }
6907 oo->oo_flags |= NFS4_OO_CONFIRMED;
6908 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6909 mutex_unlock(&stp->st_mutex);
6910 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6911 nfsd4_client_record_create(oo->oo_owner.so_client);
6912 status = nfs_ok;
6913 put_stateid:
6914 nfs4_put_stid(&stp->st_stid);
6915 out:
6916 nfsd4_bump_seqid(cstate, status);
6917 return status;
6918 }
6919
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)6920 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6921 {
6922 if (!test_access(access, stp))
6923 return;
6924 nfs4_file_put_access(stp->st_stid.sc_file, access);
6925 clear_access(access, stp);
6926 }
6927
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)6928 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6929 {
6930 switch (to_access) {
6931 case NFS4_SHARE_ACCESS_READ:
6932 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6933 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6934 break;
6935 case NFS4_SHARE_ACCESS_WRITE:
6936 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6937 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6938 break;
6939 case NFS4_SHARE_ACCESS_BOTH:
6940 break;
6941 default:
6942 WARN_ON_ONCE(1);
6943 }
6944 }
6945
6946 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6947 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6948 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6949 {
6950 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6951 __be32 status;
6952 struct nfs4_ol_stateid *stp;
6953 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6954
6955 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6956 cstate->current_fh.fh_dentry);
6957
6958 /* We don't yet support WANT bits: */
6959 if (od->od_deleg_want)
6960 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6961 od->od_deleg_want);
6962
6963 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6964 &od->od_stateid, &stp, nn);
6965 if (status)
6966 goto out;
6967 status = nfserr_inval;
6968 if (!test_access(od->od_share_access, stp)) {
6969 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6970 stp->st_access_bmap, od->od_share_access);
6971 goto put_stateid;
6972 }
6973 if (!test_deny(od->od_share_deny, stp)) {
6974 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6975 stp->st_deny_bmap, od->od_share_deny);
6976 goto put_stateid;
6977 }
6978 nfs4_stateid_downgrade(stp, od->od_share_access);
6979 reset_union_bmap_deny(od->od_share_deny, stp);
6980 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6981 status = nfs_ok;
6982 put_stateid:
6983 mutex_unlock(&stp->st_mutex);
6984 nfs4_put_stid(&stp->st_stid);
6985 out:
6986 nfsd4_bump_seqid(cstate, status);
6987 return status;
6988 }
6989
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)6990 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6991 {
6992 struct nfs4_client *clp = s->st_stid.sc_client;
6993 bool unhashed;
6994 LIST_HEAD(reaplist);
6995 struct nfs4_ol_stateid *stp;
6996
6997 spin_lock(&clp->cl_lock);
6998 unhashed = unhash_open_stateid(s, &reaplist);
6999
7000 if (clp->cl_minorversion) {
7001 if (unhashed)
7002 put_ol_stateid_locked(s, &reaplist);
7003 spin_unlock(&clp->cl_lock);
7004 list_for_each_entry(stp, &reaplist, st_locks)
7005 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7006 free_ol_stateid_reaplist(&reaplist);
7007 } else {
7008 spin_unlock(&clp->cl_lock);
7009 free_ol_stateid_reaplist(&reaplist);
7010 if (unhashed)
7011 move_to_close_lru(s, clp->net);
7012 }
7013 }
7014
7015 /*
7016 * nfs4_unlock_state() called after encode
7017 */
7018 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7019 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7020 union nfsd4_op_u *u)
7021 {
7022 struct nfsd4_close *close = &u->close;
7023 __be32 status;
7024 struct nfs4_ol_stateid *stp;
7025 struct net *net = SVC_NET(rqstp);
7026 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7027
7028 dprintk("NFSD: nfsd4_close on file %pd\n",
7029 cstate->current_fh.fh_dentry);
7030
7031 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
7032 &close->cl_stateid,
7033 NFS4_OPEN_STID|NFS4_CLOSED_STID,
7034 &stp, nn);
7035 nfsd4_bump_seqid(cstate, status);
7036 if (status)
7037 goto out;
7038
7039 stp->st_stid.sc_type = NFS4_CLOSED_STID;
7040
7041 /*
7042 * Technically we don't _really_ have to increment or copy it, since
7043 * it should just be gone after this operation and we clobber the
7044 * copied value below, but we continue to do so here just to ensure
7045 * that racing ops see that there was a state change.
7046 */
7047 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
7048
7049 nfsd4_close_open_stateid(stp);
7050 mutex_unlock(&stp->st_mutex);
7051
7052 /* v4.1+ suggests that we send a special stateid in here, since the
7053 * clients should just ignore this anyway. Since this is not useful
7054 * for v4.0 clients either, we set it to the special close_stateid
7055 * universally.
7056 *
7057 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
7058 */
7059 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
7060
7061 /* put reference from nfs4_preprocess_seqid_op */
7062 nfs4_put_stid(&stp->st_stid);
7063 out:
7064 return status;
7065 }
7066
7067 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7068 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7069 union nfsd4_op_u *u)
7070 {
7071 struct nfsd4_delegreturn *dr = &u->delegreturn;
7072 struct nfs4_delegation *dp;
7073 stateid_t *stateid = &dr->dr_stateid;
7074 struct nfs4_stid *s;
7075 __be32 status;
7076 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7077
7078 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7079 return status;
7080
7081 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
7082 if (status)
7083 goto out;
7084 dp = delegstateid(s);
7085 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
7086 if (status)
7087 goto put_stateid;
7088
7089 trace_nfsd_deleg_return(stateid);
7090 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7091 destroy_delegation(dp);
7092 put_stateid:
7093 nfs4_put_stid(&dp->dl_stid);
7094 out:
7095 return status;
7096 }
7097
7098 /* last octet in a range */
7099 static inline u64
last_byte_offset(u64 start,u64 len)7100 last_byte_offset(u64 start, u64 len)
7101 {
7102 u64 end;
7103
7104 WARN_ON_ONCE(!len);
7105 end = start + len;
7106 return end > start ? end - 1: NFS4_MAX_UINT64;
7107 }
7108
7109 /*
7110 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7111 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7112 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7113 * locking, this prevents us from being completely protocol-compliant. The
7114 * real solution to this problem is to start using unsigned file offsets in
7115 * the VFS, but this is a very deep change!
7116 */
7117 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)7118 nfs4_transform_lock_offset(struct file_lock *lock)
7119 {
7120 if (lock->fl_start < 0)
7121 lock->fl_start = OFFSET_MAX;
7122 if (lock->fl_end < 0)
7123 lock->fl_end = OFFSET_MAX;
7124 }
7125
7126 static fl_owner_t
nfsd4_lm_get_owner(fl_owner_t owner)7127 nfsd4_lm_get_owner(fl_owner_t owner)
7128 {
7129 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7130
7131 nfs4_get_stateowner(&lo->lo_owner);
7132 return owner;
7133 }
7134
7135 static void
nfsd4_lm_put_owner(fl_owner_t owner)7136 nfsd4_lm_put_owner(fl_owner_t owner)
7137 {
7138 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7139
7140 if (lo)
7141 nfs4_put_stateowner(&lo->lo_owner);
7142 }
7143
7144 /* return pointer to struct nfs4_client if client is expirable */
7145 static bool
nfsd4_lm_lock_expirable(struct file_lock * cfl)7146 nfsd4_lm_lock_expirable(struct file_lock *cfl)
7147 {
7148 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
7149 struct nfs4_client *clp = lo->lo_owner.so_client;
7150 struct nfsd_net *nn;
7151
7152 if (try_to_expire_client(clp)) {
7153 nn = net_generic(clp->net, nfsd_net_id);
7154 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7155 return true;
7156 }
7157 return false;
7158 }
7159
7160 /* schedule laundromat to run immediately and wait for it to complete */
7161 static void
nfsd4_lm_expire_lock(void)7162 nfsd4_lm_expire_lock(void)
7163 {
7164 flush_workqueue(laundry_wq);
7165 }
7166
7167 static void
nfsd4_lm_notify(struct file_lock * fl)7168 nfsd4_lm_notify(struct file_lock *fl)
7169 {
7170 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
7171 struct net *net = lo->lo_owner.so_client->net;
7172 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7173 struct nfsd4_blocked_lock *nbl = container_of(fl,
7174 struct nfsd4_blocked_lock, nbl_lock);
7175 bool queue = false;
7176
7177 /* An empty list means that something else is going to be using it */
7178 spin_lock(&nn->blocked_locks_lock);
7179 if (!list_empty(&nbl->nbl_list)) {
7180 list_del_init(&nbl->nbl_list);
7181 list_del_init(&nbl->nbl_lru);
7182 queue = true;
7183 }
7184 spin_unlock(&nn->blocked_locks_lock);
7185
7186 if (queue) {
7187 trace_nfsd_cb_notify_lock(lo, nbl);
7188 nfsd4_run_cb(&nbl->nbl_cb);
7189 }
7190 }
7191
7192 static const struct lock_manager_operations nfsd_posix_mng_ops = {
7193 .lm_mod_owner = THIS_MODULE,
7194 .lm_notify = nfsd4_lm_notify,
7195 .lm_get_owner = nfsd4_lm_get_owner,
7196 .lm_put_owner = nfsd4_lm_put_owner,
7197 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7198 .lm_expire_lock = nfsd4_lm_expire_lock,
7199 };
7200
7201 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)7202 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7203 {
7204 struct nfs4_lockowner *lo;
7205
7206 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7207 lo = (struct nfs4_lockowner *) fl->fl_owner;
7208 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7209 GFP_KERNEL);
7210 if (!deny->ld_owner.data)
7211 /* We just don't care that much */
7212 goto nevermind;
7213 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7214 } else {
7215 nevermind:
7216 deny->ld_owner.len = 0;
7217 deny->ld_owner.data = NULL;
7218 deny->ld_clientid.cl_boot = 0;
7219 deny->ld_clientid.cl_id = 0;
7220 }
7221 deny->ld_start = fl->fl_start;
7222 deny->ld_length = NFS4_MAX_UINT64;
7223 if (fl->fl_end != NFS4_MAX_UINT64)
7224 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7225 deny->ld_type = NFS4_READ_LT;
7226 if (fl->fl_type != F_RDLCK)
7227 deny->ld_type = NFS4_WRITE_LT;
7228 }
7229
7230 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)7231 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7232 {
7233 unsigned int strhashval = ownerstr_hashval(owner);
7234 struct nfs4_stateowner *so;
7235
7236 lockdep_assert_held(&clp->cl_lock);
7237
7238 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7239 so_strhash) {
7240 if (so->so_is_open_owner)
7241 continue;
7242 if (same_owner_str(so, owner))
7243 return lockowner(nfs4_get_stateowner(so));
7244 }
7245 return NULL;
7246 }
7247
7248 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)7249 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7250 {
7251 struct nfs4_lockowner *lo;
7252
7253 spin_lock(&clp->cl_lock);
7254 lo = find_lockowner_str_locked(clp, owner);
7255 spin_unlock(&clp->cl_lock);
7256 return lo;
7257 }
7258
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)7259 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7260 {
7261 unhash_lockowner_locked(lockowner(sop));
7262 }
7263
nfs4_free_lockowner(struct nfs4_stateowner * sop)7264 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7265 {
7266 struct nfs4_lockowner *lo = lockowner(sop);
7267
7268 kmem_cache_free(lockowner_slab, lo);
7269 }
7270
7271 static const struct nfs4_stateowner_operations lockowner_ops = {
7272 .so_unhash = nfs4_unhash_lockowner,
7273 .so_free = nfs4_free_lockowner,
7274 };
7275
7276 /*
7277 * Alloc a lock owner structure.
7278 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7279 * occurred.
7280 *
7281 * strhashval = ownerstr_hashval
7282 */
7283 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)7284 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7285 struct nfs4_ol_stateid *open_stp,
7286 struct nfsd4_lock *lock)
7287 {
7288 struct nfs4_lockowner *lo, *ret;
7289
7290 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7291 if (!lo)
7292 return NULL;
7293 INIT_LIST_HEAD(&lo->lo_blocked);
7294 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7295 lo->lo_owner.so_is_open_owner = 0;
7296 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7297 lo->lo_owner.so_ops = &lockowner_ops;
7298 spin_lock(&clp->cl_lock);
7299 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7300 if (ret == NULL) {
7301 list_add(&lo->lo_owner.so_strhash,
7302 &clp->cl_ownerstr_hashtbl[strhashval]);
7303 ret = lo;
7304 } else
7305 nfs4_free_stateowner(&lo->lo_owner);
7306
7307 spin_unlock(&clp->cl_lock);
7308 return ret;
7309 }
7310
7311 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)7312 find_lock_stateid(const struct nfs4_lockowner *lo,
7313 const struct nfs4_ol_stateid *ost)
7314 {
7315 struct nfs4_ol_stateid *lst;
7316
7317 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7318
7319 /* If ost is not hashed, ost->st_locks will not be valid */
7320 if (!nfs4_ol_stateid_unhashed(ost))
7321 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7322 if (lst->st_stateowner == &lo->lo_owner) {
7323 refcount_inc(&lst->st_stid.sc_count);
7324 return lst;
7325 }
7326 }
7327 return NULL;
7328 }
7329
7330 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)7331 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7332 struct nfs4_file *fp, struct inode *inode,
7333 struct nfs4_ol_stateid *open_stp)
7334 {
7335 struct nfs4_client *clp = lo->lo_owner.so_client;
7336 struct nfs4_ol_stateid *retstp;
7337
7338 mutex_init(&stp->st_mutex);
7339 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7340 retry:
7341 spin_lock(&clp->cl_lock);
7342 if (nfs4_ol_stateid_unhashed(open_stp))
7343 goto out_close;
7344 retstp = find_lock_stateid(lo, open_stp);
7345 if (retstp)
7346 goto out_found;
7347 refcount_inc(&stp->st_stid.sc_count);
7348 stp->st_stid.sc_type = NFS4_LOCK_STID;
7349 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7350 get_nfs4_file(fp);
7351 stp->st_stid.sc_file = fp;
7352 stp->st_access_bmap = 0;
7353 stp->st_deny_bmap = open_stp->st_deny_bmap;
7354 stp->st_openstp = open_stp;
7355 spin_lock(&fp->fi_lock);
7356 list_add(&stp->st_locks, &open_stp->st_locks);
7357 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7358 list_add(&stp->st_perfile, &fp->fi_stateids);
7359 spin_unlock(&fp->fi_lock);
7360 spin_unlock(&clp->cl_lock);
7361 return stp;
7362 out_found:
7363 spin_unlock(&clp->cl_lock);
7364 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7365 nfs4_put_stid(&retstp->st_stid);
7366 goto retry;
7367 }
7368 /* To keep mutex tracking happy */
7369 mutex_unlock(&stp->st_mutex);
7370 return retstp;
7371 out_close:
7372 spin_unlock(&clp->cl_lock);
7373 mutex_unlock(&stp->st_mutex);
7374 return NULL;
7375 }
7376
7377 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)7378 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7379 struct inode *inode, struct nfs4_ol_stateid *ost,
7380 bool *new)
7381 {
7382 struct nfs4_stid *ns = NULL;
7383 struct nfs4_ol_stateid *lst;
7384 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7385 struct nfs4_client *clp = oo->oo_owner.so_client;
7386
7387 *new = false;
7388 spin_lock(&clp->cl_lock);
7389 lst = find_lock_stateid(lo, ost);
7390 spin_unlock(&clp->cl_lock);
7391 if (lst != NULL) {
7392 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7393 goto out;
7394 nfs4_put_stid(&lst->st_stid);
7395 }
7396 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7397 if (ns == NULL)
7398 return NULL;
7399
7400 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7401 if (lst == openlockstateid(ns))
7402 *new = true;
7403 else
7404 nfs4_put_stid(ns);
7405 out:
7406 return lst;
7407 }
7408
7409 static int
check_lock_length(u64 offset,u64 length)7410 check_lock_length(u64 offset, u64 length)
7411 {
7412 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7413 (length > ~offset)));
7414 }
7415
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)7416 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7417 {
7418 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7419
7420 lockdep_assert_held(&fp->fi_lock);
7421
7422 if (test_access(access, lock_stp))
7423 return;
7424 __nfs4_file_get_access(fp, access);
7425 set_access(access, lock_stp);
7426 }
7427
7428 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)7429 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7430 struct nfs4_ol_stateid *ost,
7431 struct nfsd4_lock *lock,
7432 struct nfs4_ol_stateid **plst, bool *new)
7433 {
7434 __be32 status;
7435 struct nfs4_file *fi = ost->st_stid.sc_file;
7436 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7437 struct nfs4_client *cl = oo->oo_owner.so_client;
7438 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7439 struct nfs4_lockowner *lo;
7440 struct nfs4_ol_stateid *lst;
7441 unsigned int strhashval;
7442
7443 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7444 if (!lo) {
7445 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7446 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7447 if (lo == NULL)
7448 return nfserr_jukebox;
7449 } else {
7450 /* with an existing lockowner, seqids must be the same */
7451 status = nfserr_bad_seqid;
7452 if (!cstate->minorversion &&
7453 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7454 goto out;
7455 }
7456
7457 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7458 if (lst == NULL) {
7459 status = nfserr_jukebox;
7460 goto out;
7461 }
7462
7463 status = nfs_ok;
7464 *plst = lst;
7465 out:
7466 nfs4_put_stateowner(&lo->lo_owner);
7467 return status;
7468 }
7469
7470 /*
7471 * LOCK operation
7472 */
7473 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7474 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7475 union nfsd4_op_u *u)
7476 {
7477 struct nfsd4_lock *lock = &u->lock;
7478 struct nfs4_openowner *open_sop = NULL;
7479 struct nfs4_lockowner *lock_sop = NULL;
7480 struct nfs4_ol_stateid *lock_stp = NULL;
7481 struct nfs4_ol_stateid *open_stp = NULL;
7482 struct nfs4_file *fp;
7483 struct nfsd_file *nf = NULL;
7484 struct nfsd4_blocked_lock *nbl = NULL;
7485 struct file_lock *file_lock = NULL;
7486 struct file_lock *conflock = NULL;
7487 __be32 status = 0;
7488 int lkflg;
7489 int err;
7490 bool new = false;
7491 unsigned char fl_type;
7492 unsigned int fl_flags = FL_POSIX;
7493 struct net *net = SVC_NET(rqstp);
7494 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7495
7496 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7497 (long long) lock->lk_offset,
7498 (long long) lock->lk_length);
7499
7500 if (check_lock_length(lock->lk_offset, lock->lk_length))
7501 return nfserr_inval;
7502
7503 if ((status = fh_verify(rqstp, &cstate->current_fh,
7504 S_IFREG, NFSD_MAY_LOCK))) {
7505 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7506 return status;
7507 }
7508
7509 if (lock->lk_is_new) {
7510 if (nfsd4_has_session(cstate))
7511 /* See rfc 5661 18.10.3: given clientid is ignored: */
7512 memcpy(&lock->lk_new_clientid,
7513 &cstate->clp->cl_clientid,
7514 sizeof(clientid_t));
7515
7516 /* validate and update open stateid and open seqid */
7517 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7518 lock->lk_new_open_seqid,
7519 &lock->lk_new_open_stateid,
7520 &open_stp, nn);
7521 if (status)
7522 goto out;
7523 mutex_unlock(&open_stp->st_mutex);
7524 open_sop = openowner(open_stp->st_stateowner);
7525 status = nfserr_bad_stateid;
7526 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7527 &lock->lk_new_clientid))
7528 goto out;
7529 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7530 &lock_stp, &new);
7531 } else {
7532 status = nfs4_preprocess_seqid_op(cstate,
7533 lock->lk_old_lock_seqid,
7534 &lock->lk_old_lock_stateid,
7535 NFS4_LOCK_STID, &lock_stp, nn);
7536 }
7537 if (status)
7538 goto out;
7539 lock_sop = lockowner(lock_stp->st_stateowner);
7540
7541 lkflg = setlkflg(lock->lk_type);
7542 status = nfs4_check_openmode(lock_stp, lkflg);
7543 if (status)
7544 goto out;
7545
7546 status = nfserr_grace;
7547 if (locks_in_grace(net) && !lock->lk_reclaim)
7548 goto out;
7549 status = nfserr_no_grace;
7550 if (!locks_in_grace(net) && lock->lk_reclaim)
7551 goto out;
7552
7553 if (lock->lk_reclaim)
7554 fl_flags |= FL_RECLAIM;
7555
7556 fp = lock_stp->st_stid.sc_file;
7557 switch (lock->lk_type) {
7558 case NFS4_READW_LT:
7559 if (nfsd4_has_session(cstate))
7560 fl_flags |= FL_SLEEP;
7561 fallthrough;
7562 case NFS4_READ_LT:
7563 spin_lock(&fp->fi_lock);
7564 nf = find_readable_file_locked(fp);
7565 if (nf)
7566 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7567 spin_unlock(&fp->fi_lock);
7568 fl_type = F_RDLCK;
7569 break;
7570 case NFS4_WRITEW_LT:
7571 if (nfsd4_has_session(cstate))
7572 fl_flags |= FL_SLEEP;
7573 fallthrough;
7574 case NFS4_WRITE_LT:
7575 spin_lock(&fp->fi_lock);
7576 nf = find_writeable_file_locked(fp);
7577 if (nf)
7578 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7579 spin_unlock(&fp->fi_lock);
7580 fl_type = F_WRLCK;
7581 break;
7582 default:
7583 status = nfserr_inval;
7584 goto out;
7585 }
7586
7587 if (!nf) {
7588 status = nfserr_openmode;
7589 goto out;
7590 }
7591
7592 /*
7593 * Most filesystems with their own ->lock operations will block
7594 * the nfsd thread waiting to acquire the lock. That leads to
7595 * deadlocks (we don't want every nfsd thread tied up waiting
7596 * for file locks), so don't attempt blocking lock notifications
7597 * on those filesystems:
7598 */
7599 if (nf->nf_file->f_op->lock)
7600 fl_flags &= ~FL_SLEEP;
7601
7602 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7603 if (!nbl) {
7604 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7605 status = nfserr_jukebox;
7606 goto out;
7607 }
7608
7609 file_lock = &nbl->nbl_lock;
7610 file_lock->fl_type = fl_type;
7611 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7612 file_lock->fl_pid = current->tgid;
7613 file_lock->fl_file = nf->nf_file;
7614 file_lock->fl_flags = fl_flags;
7615 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7616 file_lock->fl_start = lock->lk_offset;
7617 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7618 nfs4_transform_lock_offset(file_lock);
7619
7620 conflock = locks_alloc_lock();
7621 if (!conflock) {
7622 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7623 status = nfserr_jukebox;
7624 goto out;
7625 }
7626
7627 if (fl_flags & FL_SLEEP) {
7628 nbl->nbl_time = ktime_get_boottime_seconds();
7629 spin_lock(&nn->blocked_locks_lock);
7630 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7631 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
7632 kref_get(&nbl->nbl_kref);
7633 spin_unlock(&nn->blocked_locks_lock);
7634 }
7635
7636 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
7637 switch (err) {
7638 case 0: /* success! */
7639 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
7640 status = 0;
7641 if (lock->lk_reclaim)
7642 nn->somebody_reclaimed = true;
7643 break;
7644 case FILE_LOCK_DEFERRED:
7645 kref_put(&nbl->nbl_kref, free_nbl);
7646 nbl = NULL;
7647 fallthrough;
7648 case -EAGAIN: /* conflock holds conflicting lock */
7649 status = nfserr_denied;
7650 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7651 nfs4_set_lock_denied(conflock, &lock->lk_denied);
7652 break;
7653 case -EDEADLK:
7654 status = nfserr_deadlock;
7655 break;
7656 default:
7657 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7658 status = nfserrno(err);
7659 break;
7660 }
7661 out:
7662 if (nbl) {
7663 /* dequeue it if we queued it before */
7664 if (fl_flags & FL_SLEEP) {
7665 spin_lock(&nn->blocked_locks_lock);
7666 if (!list_empty(&nbl->nbl_list) &&
7667 !list_empty(&nbl->nbl_lru)) {
7668 list_del_init(&nbl->nbl_list);
7669 list_del_init(&nbl->nbl_lru);
7670 kref_put(&nbl->nbl_kref, free_nbl);
7671 }
7672 /* nbl can use one of lists to be linked to reaplist */
7673 spin_unlock(&nn->blocked_locks_lock);
7674 }
7675 free_blocked_lock(nbl);
7676 }
7677 if (nf)
7678 nfsd_file_put(nf);
7679 if (lock_stp) {
7680 /* Bump seqid manually if the 4.0 replay owner is openowner */
7681 if (cstate->replay_owner &&
7682 cstate->replay_owner != &lock_sop->lo_owner &&
7683 seqid_mutating_err(ntohl(status)))
7684 lock_sop->lo_owner.so_seqid++;
7685
7686 /*
7687 * If this is a new, never-before-used stateid, and we are
7688 * returning an error, then just go ahead and release it.
7689 */
7690 if (status && new)
7691 release_lock_stateid(lock_stp);
7692
7693 mutex_unlock(&lock_stp->st_mutex);
7694
7695 nfs4_put_stid(&lock_stp->st_stid);
7696 }
7697 if (open_stp)
7698 nfs4_put_stid(&open_stp->st_stid);
7699 nfsd4_bump_seqid(cstate, status);
7700 if (conflock)
7701 locks_free_lock(conflock);
7702 return status;
7703 }
7704
7705 /*
7706 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7707 * so we do a temporary open here just to get an open file to pass to
7708 * vfs_test_lock.
7709 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)7710 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7711 {
7712 struct nfsd_file *nf;
7713 struct inode *inode;
7714 __be32 err;
7715
7716 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7717 if (err)
7718 return err;
7719 inode = fhp->fh_dentry->d_inode;
7720 inode_lock(inode); /* to block new leases till after test_lock: */
7721 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
7722 if (err)
7723 goto out;
7724 lock->fl_file = nf->nf_file;
7725 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7726 lock->fl_file = NULL;
7727 out:
7728 inode_unlock(inode);
7729 nfsd_file_put(nf);
7730 return err;
7731 }
7732
7733 /*
7734 * LOCKT operation
7735 */
7736 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7737 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7738 union nfsd4_op_u *u)
7739 {
7740 struct nfsd4_lockt *lockt = &u->lockt;
7741 struct file_lock *file_lock = NULL;
7742 struct nfs4_lockowner *lo = NULL;
7743 __be32 status;
7744 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7745
7746 if (locks_in_grace(SVC_NET(rqstp)))
7747 return nfserr_grace;
7748
7749 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7750 return nfserr_inval;
7751
7752 if (!nfsd4_has_session(cstate)) {
7753 status = set_client(&lockt->lt_clientid, cstate, nn);
7754 if (status)
7755 goto out;
7756 }
7757
7758 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7759 goto out;
7760
7761 file_lock = locks_alloc_lock();
7762 if (!file_lock) {
7763 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7764 status = nfserr_jukebox;
7765 goto out;
7766 }
7767
7768 switch (lockt->lt_type) {
7769 case NFS4_READ_LT:
7770 case NFS4_READW_LT:
7771 file_lock->fl_type = F_RDLCK;
7772 break;
7773 case NFS4_WRITE_LT:
7774 case NFS4_WRITEW_LT:
7775 file_lock->fl_type = F_WRLCK;
7776 break;
7777 default:
7778 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7779 status = nfserr_inval;
7780 goto out;
7781 }
7782
7783 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7784 if (lo)
7785 file_lock->fl_owner = (fl_owner_t)lo;
7786 file_lock->fl_pid = current->tgid;
7787 file_lock->fl_flags = FL_POSIX;
7788
7789 file_lock->fl_start = lockt->lt_offset;
7790 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7791
7792 nfs4_transform_lock_offset(file_lock);
7793
7794 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7795 if (status)
7796 goto out;
7797
7798 if (file_lock->fl_type != F_UNLCK) {
7799 status = nfserr_denied;
7800 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7801 }
7802 out:
7803 if (lo)
7804 nfs4_put_stateowner(&lo->lo_owner);
7805 if (file_lock)
7806 locks_free_lock(file_lock);
7807 return status;
7808 }
7809
7810 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7811 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7812 union nfsd4_op_u *u)
7813 {
7814 struct nfsd4_locku *locku = &u->locku;
7815 struct nfs4_ol_stateid *stp;
7816 struct nfsd_file *nf = NULL;
7817 struct file_lock *file_lock = NULL;
7818 __be32 status;
7819 int err;
7820 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7821
7822 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7823 (long long) locku->lu_offset,
7824 (long long) locku->lu_length);
7825
7826 if (check_lock_length(locku->lu_offset, locku->lu_length))
7827 return nfserr_inval;
7828
7829 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7830 &locku->lu_stateid, NFS4_LOCK_STID,
7831 &stp, nn);
7832 if (status)
7833 goto out;
7834 nf = find_any_file(stp->st_stid.sc_file);
7835 if (!nf) {
7836 status = nfserr_lock_range;
7837 goto put_stateid;
7838 }
7839 file_lock = locks_alloc_lock();
7840 if (!file_lock) {
7841 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7842 status = nfserr_jukebox;
7843 goto put_file;
7844 }
7845
7846 file_lock->fl_type = F_UNLCK;
7847 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7848 file_lock->fl_pid = current->tgid;
7849 file_lock->fl_file = nf->nf_file;
7850 file_lock->fl_flags = FL_POSIX;
7851 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7852 file_lock->fl_start = locku->lu_offset;
7853
7854 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7855 locku->lu_length);
7856 nfs4_transform_lock_offset(file_lock);
7857
7858 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7859 if (err) {
7860 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7861 goto out_nfserr;
7862 }
7863 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7864 put_file:
7865 nfsd_file_put(nf);
7866 put_stateid:
7867 mutex_unlock(&stp->st_mutex);
7868 nfs4_put_stid(&stp->st_stid);
7869 out:
7870 nfsd4_bump_seqid(cstate, status);
7871 if (file_lock)
7872 locks_free_lock(file_lock);
7873 return status;
7874
7875 out_nfserr:
7876 status = nfserrno(err);
7877 goto put_file;
7878 }
7879
7880 /*
7881 * returns
7882 * true: locks held by lockowner
7883 * false: no locks held by lockowner
7884 */
7885 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)7886 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7887 {
7888 struct file_lock *fl;
7889 int status = false;
7890 struct nfsd_file *nf;
7891 struct inode *inode;
7892 struct file_lock_context *flctx;
7893
7894 spin_lock(&fp->fi_lock);
7895 nf = find_any_file_locked(fp);
7896 if (!nf) {
7897 /* Any valid lock stateid should have some sort of access */
7898 WARN_ON_ONCE(1);
7899 goto out;
7900 }
7901
7902 inode = file_inode(nf->nf_file);
7903 flctx = locks_inode_context(inode);
7904
7905 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7906 spin_lock(&flctx->flc_lock);
7907 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7908 if (fl->fl_owner == (fl_owner_t)lowner) {
7909 status = true;
7910 break;
7911 }
7912 }
7913 spin_unlock(&flctx->flc_lock);
7914 }
7915 out:
7916 spin_unlock(&fp->fi_lock);
7917 return status;
7918 }
7919
7920 /**
7921 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
7922 * @rqstp: RPC transaction
7923 * @cstate: NFSv4 COMPOUND state
7924 * @u: RELEASE_LOCKOWNER arguments
7925 *
7926 * Check if theree are any locks still held and if not - free the lockowner
7927 * and any lock state that is owned.
7928 *
7929 * Return values:
7930 * %nfs_ok: lockowner released or not found
7931 * %nfserr_locks_held: lockowner still in use
7932 * %nfserr_stale_clientid: clientid no longer active
7933 * %nfserr_expired: clientid not recognized
7934 */
7935 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7936 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7937 struct nfsd4_compound_state *cstate,
7938 union nfsd4_op_u *u)
7939 {
7940 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7941 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7942 clientid_t *clid = &rlockowner->rl_clientid;
7943 struct nfs4_ol_stateid *stp;
7944 struct nfs4_lockowner *lo;
7945 struct nfs4_client *clp;
7946 LIST_HEAD(reaplist);
7947 __be32 status;
7948
7949 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7950 clid->cl_boot, clid->cl_id);
7951
7952 status = set_client(clid, cstate, nn);
7953 if (status)
7954 return status;
7955 clp = cstate->clp;
7956
7957 spin_lock(&clp->cl_lock);
7958 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7959 if (!lo) {
7960 spin_unlock(&clp->cl_lock);
7961 return nfs_ok;
7962 }
7963
7964 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
7965 if (check_for_locks(stp->st_stid.sc_file, lo)) {
7966 spin_unlock(&clp->cl_lock);
7967 nfs4_put_stateowner(&lo->lo_owner);
7968 return nfserr_locks_held;
7969 }
7970 }
7971 unhash_lockowner_locked(lo);
7972 while (!list_empty(&lo->lo_owner.so_stateids)) {
7973 stp = list_first_entry(&lo->lo_owner.so_stateids,
7974 struct nfs4_ol_stateid,
7975 st_perstateowner);
7976 WARN_ON(!unhash_lock_stateid(stp));
7977 put_ol_stateid_locked(stp, &reaplist);
7978 }
7979 spin_unlock(&clp->cl_lock);
7980
7981 free_ol_stateid_reaplist(&reaplist);
7982 remove_blocked_locks(lo);
7983 nfs4_put_stateowner(&lo->lo_owner);
7984 return nfs_ok;
7985 }
7986
7987 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)7988 alloc_reclaim(void)
7989 {
7990 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7991 }
7992
7993 bool
nfs4_has_reclaimed_state(struct xdr_netobj name,struct nfsd_net * nn)7994 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7995 {
7996 struct nfs4_client_reclaim *crp;
7997
7998 crp = nfsd4_find_reclaim_client(name, nn);
7999 return (crp && crp->cr_clp);
8000 }
8001
8002 /*
8003 * failure => all reset bets are off, nfserr_no_grace...
8004 *
8005 * The caller is responsible for freeing name.data if NULL is returned (it
8006 * will be freed in nfs4_remove_reclaim_record in the normal case).
8007 */
8008 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name,struct xdr_netobj princhash,struct nfsd_net * nn)8009 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
8010 struct nfsd_net *nn)
8011 {
8012 unsigned int strhashval;
8013 struct nfs4_client_reclaim *crp;
8014
8015 crp = alloc_reclaim();
8016 if (crp) {
8017 strhashval = clientstr_hashval(name);
8018 INIT_LIST_HEAD(&crp->cr_strhash);
8019 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
8020 crp->cr_name.data = name.data;
8021 crp->cr_name.len = name.len;
8022 crp->cr_princhash.data = princhash.data;
8023 crp->cr_princhash.len = princhash.len;
8024 crp->cr_clp = NULL;
8025 nn->reclaim_str_hashtbl_size++;
8026 }
8027 return crp;
8028 }
8029
8030 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)8031 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
8032 {
8033 list_del(&crp->cr_strhash);
8034 kfree(crp->cr_name.data);
8035 kfree(crp->cr_princhash.data);
8036 kfree(crp);
8037 nn->reclaim_str_hashtbl_size--;
8038 }
8039
8040 void
nfs4_release_reclaim(struct nfsd_net * nn)8041 nfs4_release_reclaim(struct nfsd_net *nn)
8042 {
8043 struct nfs4_client_reclaim *crp = NULL;
8044 int i;
8045
8046 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8047 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
8048 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
8049 struct nfs4_client_reclaim, cr_strhash);
8050 nfs4_remove_reclaim_record(crp, nn);
8051 }
8052 }
8053 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
8054 }
8055
8056 /*
8057 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
8058 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name,struct nfsd_net * nn)8059 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
8060 {
8061 unsigned int strhashval;
8062 struct nfs4_client_reclaim *crp = NULL;
8063
8064 strhashval = clientstr_hashval(name);
8065 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
8066 if (compare_blob(&crp->cr_name, &name) == 0) {
8067 return crp;
8068 }
8069 }
8070 return NULL;
8071 }
8072
8073 __be32
nfs4_check_open_reclaim(struct nfs4_client * clp)8074 nfs4_check_open_reclaim(struct nfs4_client *clp)
8075 {
8076 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8077 return nfserr_no_grace;
8078
8079 if (nfsd4_client_record_check(clp))
8080 return nfserr_reclaim_bad;
8081
8082 return nfs_ok;
8083 }
8084
8085 /*
8086 * Since the lifetime of a delegation isn't limited to that of an open, a
8087 * client may quite reasonably hang on to a delegation as long as it has
8088 * the inode cached. This becomes an obvious problem the first time a
8089 * client's inode cache approaches the size of the server's total memory.
8090 *
8091 * For now we avoid this problem by imposing a hard limit on the number
8092 * of delegations, which varies according to the server's memory size.
8093 */
8094 static void
set_max_delegations(void)8095 set_max_delegations(void)
8096 {
8097 /*
8098 * Allow at most 4 delegations per megabyte of RAM. Quick
8099 * estimates suggest that in the worst case (where every delegation
8100 * is for a different inode), a delegation could take about 1.5K,
8101 * giving a worst case usage of about 6% of memory.
8102 */
8103 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8104 }
8105
nfs4_state_create_net(struct net * net)8106 static int nfs4_state_create_net(struct net *net)
8107 {
8108 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8109 int i;
8110
8111 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8112 sizeof(struct list_head),
8113 GFP_KERNEL);
8114 if (!nn->conf_id_hashtbl)
8115 goto err;
8116 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8117 sizeof(struct list_head),
8118 GFP_KERNEL);
8119 if (!nn->unconf_id_hashtbl)
8120 goto err_unconf_id;
8121 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8122 sizeof(struct list_head),
8123 GFP_KERNEL);
8124 if (!nn->sessionid_hashtbl)
8125 goto err_sessionid;
8126
8127 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8128 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8129 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
8130 }
8131 for (i = 0; i < SESSION_HASH_SIZE; i++)
8132 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8133 nn->conf_name_tree = RB_ROOT;
8134 nn->unconf_name_tree = RB_ROOT;
8135 nn->boot_time = ktime_get_real_seconds();
8136 nn->grace_ended = false;
8137 nn->nfsd4_manager.block_opens = true;
8138 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8139 INIT_LIST_HEAD(&nn->client_lru);
8140 INIT_LIST_HEAD(&nn->close_lru);
8141 INIT_LIST_HEAD(&nn->del_recall_lru);
8142 spin_lock_init(&nn->client_lock);
8143 spin_lock_init(&nn->s2s_cp_lock);
8144 idr_init(&nn->s2s_cp_stateids);
8145
8146 spin_lock_init(&nn->blocked_locks_lock);
8147 INIT_LIST_HEAD(&nn->blocked_locks_lru);
8148
8149 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8150 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8151 get_net(net);
8152
8153 nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
8154 nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
8155 nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
8156
8157 if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"))
8158 goto err_shrinker;
8159 return 0;
8160
8161 err_shrinker:
8162 put_net(net);
8163 kfree(nn->sessionid_hashtbl);
8164 err_sessionid:
8165 kfree(nn->unconf_id_hashtbl);
8166 err_unconf_id:
8167 kfree(nn->conf_id_hashtbl);
8168 err:
8169 return -ENOMEM;
8170 }
8171
8172 static void
nfs4_state_destroy_net(struct net * net)8173 nfs4_state_destroy_net(struct net *net)
8174 {
8175 int i;
8176 struct nfs4_client *clp = NULL;
8177 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8178
8179 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8180 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8181 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8182 destroy_client(clp);
8183 }
8184 }
8185
8186 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8187
8188 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8189 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8190 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8191 destroy_client(clp);
8192 }
8193 }
8194
8195 kfree(nn->sessionid_hashtbl);
8196 kfree(nn->unconf_id_hashtbl);
8197 kfree(nn->conf_id_hashtbl);
8198 put_net(net);
8199 }
8200
8201 int
nfs4_state_start_net(struct net * net)8202 nfs4_state_start_net(struct net *net)
8203 {
8204 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8205 int ret;
8206
8207 ret = nfs4_state_create_net(net);
8208 if (ret)
8209 return ret;
8210 locks_start_grace(net, &nn->nfsd4_manager);
8211 nfsd4_client_tracking_init(net);
8212 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8213 goto skip_grace;
8214 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8215 nn->nfsd4_grace, net->ns.inum);
8216 trace_nfsd_grace_start(nn);
8217 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8218 return 0;
8219
8220 skip_grace:
8221 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8222 net->ns.inum);
8223 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8224 nfsd4_end_grace(nn);
8225 return 0;
8226 }
8227
8228 /* initialization to perform when the nfsd service is started: */
8229
8230 int
nfs4_state_start(void)8231 nfs4_state_start(void)
8232 {
8233 int ret;
8234
8235 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8236 if (ret)
8237 return ret;
8238
8239 ret = nfsd4_create_callback_queue();
8240 if (ret) {
8241 rhltable_destroy(&nfs4_file_rhltable);
8242 return ret;
8243 }
8244
8245 set_max_delegations();
8246 return 0;
8247 }
8248
8249 void
nfs4_state_shutdown_net(struct net * net)8250 nfs4_state_shutdown_net(struct net *net)
8251 {
8252 struct nfs4_delegation *dp = NULL;
8253 struct list_head *pos, *next, reaplist;
8254 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8255
8256 unregister_shrinker(&nn->nfsd_client_shrinker);
8257 cancel_work_sync(&nn->nfsd_shrinker_work);
8258 cancel_delayed_work_sync(&nn->laundromat_work);
8259 locks_end_grace(&nn->nfsd4_manager);
8260
8261 INIT_LIST_HEAD(&reaplist);
8262 spin_lock(&state_lock);
8263 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8264 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8265 WARN_ON(!unhash_delegation_locked(dp));
8266 list_add(&dp->dl_recall_lru, &reaplist);
8267 }
8268 spin_unlock(&state_lock);
8269 list_for_each_safe(pos, next, &reaplist) {
8270 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8271 list_del_init(&dp->dl_recall_lru);
8272 destroy_unhashed_deleg(dp);
8273 }
8274
8275 nfsd4_client_tracking_exit(net);
8276 nfs4_state_destroy_net(net);
8277 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
8278 nfsd4_ssc_shutdown_umount(nn);
8279 #endif
8280 }
8281
8282 void
nfs4_state_shutdown(void)8283 nfs4_state_shutdown(void)
8284 {
8285 nfsd4_destroy_callback_queue();
8286 rhltable_destroy(&nfs4_file_rhltable);
8287 }
8288
8289 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8290 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8291 {
8292 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8293 CURRENT_STATEID(stateid))
8294 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8295 }
8296
8297 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8298 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8299 {
8300 if (cstate->minorversion) {
8301 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8302 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8303 }
8304 }
8305
8306 void
clear_current_stateid(struct nfsd4_compound_state * cstate)8307 clear_current_stateid(struct nfsd4_compound_state *cstate)
8308 {
8309 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8310 }
8311
8312 /*
8313 * functions to set current state id
8314 */
8315 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8316 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8317 union nfsd4_op_u *u)
8318 {
8319 put_stateid(cstate, &u->open_downgrade.od_stateid);
8320 }
8321
8322 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8323 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8324 union nfsd4_op_u *u)
8325 {
8326 put_stateid(cstate, &u->open.op_stateid);
8327 }
8328
8329 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8330 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8331 union nfsd4_op_u *u)
8332 {
8333 put_stateid(cstate, &u->close.cl_stateid);
8334 }
8335
8336 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8337 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8338 union nfsd4_op_u *u)
8339 {
8340 put_stateid(cstate, &u->lock.lk_resp_stateid);
8341 }
8342
8343 /*
8344 * functions to consume current state id
8345 */
8346
8347 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8348 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8349 union nfsd4_op_u *u)
8350 {
8351 get_stateid(cstate, &u->open_downgrade.od_stateid);
8352 }
8353
8354 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8355 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8356 union nfsd4_op_u *u)
8357 {
8358 get_stateid(cstate, &u->delegreturn.dr_stateid);
8359 }
8360
8361 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8362 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8363 union nfsd4_op_u *u)
8364 {
8365 get_stateid(cstate, &u->free_stateid.fr_stateid);
8366 }
8367
8368 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8369 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8370 union nfsd4_op_u *u)
8371 {
8372 get_stateid(cstate, &u->setattr.sa_stateid);
8373 }
8374
8375 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8376 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8377 union nfsd4_op_u *u)
8378 {
8379 get_stateid(cstate, &u->close.cl_stateid);
8380 }
8381
8382 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8383 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8384 union nfsd4_op_u *u)
8385 {
8386 get_stateid(cstate, &u->locku.lu_stateid);
8387 }
8388
8389 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8390 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8391 union nfsd4_op_u *u)
8392 {
8393 get_stateid(cstate, &u->read.rd_stateid);
8394 }
8395
8396 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8397 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8398 union nfsd4_op_u *u)
8399 {
8400 get_stateid(cstate, &u->write.wr_stateid);
8401 }
8402
8403 /**
8404 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8405 * @rqstp: RPC transaction context
8406 * @inode: file to be checked for a conflict
8407 *
8408 * This function is called when there is a conflict between a write
8409 * delegation and a change/size GETATTR from another client. The server
8410 * must either use the CB_GETATTR to get the current values of the
8411 * attributes from the client that holds the delegation or recall the
8412 * delegation before replying to the GETATTR. See RFC 8881 section
8413 * 18.7.4.
8414 *
8415 * The current implementation does not support CB_GETATTR yet. However
8416 * this can avoid recalling the delegation could be added in follow up
8417 * work.
8418 *
8419 * Returns 0 if there is no conflict; otherwise an nfs_stat
8420 * code is returned.
8421 */
8422 __be32
nfsd4_deleg_getattr_conflict(struct svc_rqst * rqstp,struct inode * inode)8423 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
8424 {
8425 __be32 status;
8426 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8427 struct file_lock_context *ctx;
8428 struct file_lock *fl;
8429 struct nfs4_delegation *dp;
8430
8431 ctx = locks_inode_context(inode);
8432 if (!ctx)
8433 return 0;
8434 spin_lock(&ctx->flc_lock);
8435 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
8436 if (fl->fl_flags == FL_LAYOUT)
8437 continue;
8438 if (fl->fl_lmops != &nfsd_lease_mng_ops) {
8439 /*
8440 * non-nfs lease, if it's a lease with F_RDLCK then
8441 * we are done; there isn't any write delegation
8442 * on this inode
8443 */
8444 if (fl->fl_type == F_RDLCK)
8445 break;
8446 goto break_lease;
8447 }
8448 if (fl->fl_type == F_WRLCK) {
8449 dp = fl->fl_owner;
8450 if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
8451 spin_unlock(&ctx->flc_lock);
8452 return 0;
8453 }
8454 break_lease:
8455 spin_unlock(&ctx->flc_lock);
8456 nfsd_stats_wdeleg_getattr_inc(nn);
8457 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8458 if (status != nfserr_jukebox ||
8459 !nfsd_wait_for_delegreturn(rqstp, inode))
8460 return status;
8461 return 0;
8462 }
8463 break;
8464 }
8465 spin_unlock(&ctx->flc_lock);
8466 return 0;
8467 }
8468