1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include <linux/string_helpers.h> 46 #include <linux/fsnotify.h> 47 #include <linux/nfs_ssc.h> 48 #include "xdr4.h" 49 #include "xdr4cb.h" 50 #include "vfs.h" 51 #include "current_stateid.h" 52 53 #include "netns.h" 54 #include "pnfs.h" 55 #include "filecache.h" 56 #include "trace.h" 57 58 #define NFSDDBG_FACILITY NFSDDBG_PROC 59 60 #define all_ones {{~0,~0},~0} 61 static const stateid_t one_stateid = { 62 .si_generation = ~0, 63 .si_opaque = all_ones, 64 }; 65 static const stateid_t zero_stateid = { 66 /* all fields zero */ 67 }; 68 static const stateid_t currentstateid = { 69 .si_generation = 1, 70 }; 71 static const stateid_t close_stateid = { 72 .si_generation = 0xffffffffU, 73 }; 74 75 static u64 current_sessionid = 1; 76 77 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 78 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 79 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 80 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) 81 82 /* forward declarations */ 83 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 84 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 85 void nfsd4_end_grace(struct nfsd_net *nn); 86 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); 87 88 /* Locking: */ 89 90 /* 91 * Currently used for the del_recall_lru and file hash table. In an 92 * effort to decrease the scope of the client_mutex, this spinlock may 93 * eventually cover more: 94 */ 95 static DEFINE_SPINLOCK(state_lock); 96 97 enum nfsd4_st_mutex_lock_subclass { 98 OPEN_STATEID_MUTEX = 0, 99 LOCK_STATEID_MUTEX = 1, 100 }; 101 102 /* 103 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 104 * the refcount on the open stateid to drop. 105 */ 106 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 107 108 /* 109 * A waitqueue where a writer to clients/#/ctl destroying a client can 110 * wait for cl_rpc_users to drop to 0 and then for the client to be 111 * unhashed. 112 */ 113 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); 114 115 static struct kmem_cache *client_slab; 116 static struct kmem_cache *openowner_slab; 117 static struct kmem_cache *lockowner_slab; 118 static struct kmem_cache *file_slab; 119 static struct kmem_cache *stateid_slab; 120 static struct kmem_cache *deleg_slab; 121 static struct kmem_cache *odstate_slab; 122 123 static void free_session(struct nfsd4_session *); 124 125 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 126 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 127 128 static bool is_session_dead(struct nfsd4_session *ses) 129 { 130 return ses->se_flags & NFS4_SESSION_DEAD; 131 } 132 133 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 134 { 135 if (atomic_read(&ses->se_ref) > ref_held_by_me) 136 return nfserr_jukebox; 137 ses->se_flags |= NFS4_SESSION_DEAD; 138 return nfs_ok; 139 } 140 141 static bool is_client_expired(struct nfs4_client *clp) 142 { 143 return clp->cl_time == 0; 144 } 145 146 static __be32 get_client_locked(struct nfs4_client *clp) 147 { 148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 149 150 lockdep_assert_held(&nn->client_lock); 151 152 if (is_client_expired(clp)) 153 return nfserr_expired; 154 atomic_inc(&clp->cl_rpc_users); 155 return nfs_ok; 156 } 157 158 /* must be called under the client_lock */ 159 static inline void 160 renew_client_locked(struct nfs4_client *clp) 161 { 162 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 163 164 if (is_client_expired(clp)) { 165 WARN_ON(1); 166 printk("%s: client (clientid %08x/%08x) already expired\n", 167 __func__, 168 clp->cl_clientid.cl_boot, 169 clp->cl_clientid.cl_id); 170 return; 171 } 172 173 list_move_tail(&clp->cl_lru, &nn->client_lru); 174 clp->cl_time = ktime_get_boottime_seconds(); 175 } 176 177 static void put_client_renew_locked(struct nfs4_client *clp) 178 { 179 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 180 181 lockdep_assert_held(&nn->client_lock); 182 183 if (!atomic_dec_and_test(&clp->cl_rpc_users)) 184 return; 185 if (!is_client_expired(clp)) 186 renew_client_locked(clp); 187 else 188 wake_up_all(&expiry_wq); 189 } 190 191 static void put_client_renew(struct nfs4_client *clp) 192 { 193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 194 195 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) 196 return; 197 if (!is_client_expired(clp)) 198 renew_client_locked(clp); 199 else 200 wake_up_all(&expiry_wq); 201 spin_unlock(&nn->client_lock); 202 } 203 204 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 205 { 206 __be32 status; 207 208 if (is_session_dead(ses)) 209 return nfserr_badsession; 210 status = get_client_locked(ses->se_client); 211 if (status) 212 return status; 213 atomic_inc(&ses->se_ref); 214 return nfs_ok; 215 } 216 217 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 218 { 219 struct nfs4_client *clp = ses->se_client; 220 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 221 222 lockdep_assert_held(&nn->client_lock); 223 224 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 225 free_session(ses); 226 put_client_renew_locked(clp); 227 } 228 229 static void nfsd4_put_session(struct nfsd4_session *ses) 230 { 231 struct nfs4_client *clp = ses->se_client; 232 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 233 234 spin_lock(&nn->client_lock); 235 nfsd4_put_session_locked(ses); 236 spin_unlock(&nn->client_lock); 237 } 238 239 static struct nfsd4_blocked_lock * 240 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 241 struct nfsd_net *nn) 242 { 243 struct nfsd4_blocked_lock *cur, *found = NULL; 244 245 spin_lock(&nn->blocked_locks_lock); 246 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 247 if (fh_match(fh, &cur->nbl_fh)) { 248 list_del_init(&cur->nbl_list); 249 WARN_ON(list_empty(&cur->nbl_lru)); 250 list_del_init(&cur->nbl_lru); 251 found = cur; 252 break; 253 } 254 } 255 spin_unlock(&nn->blocked_locks_lock); 256 if (found) 257 locks_delete_block(&found->nbl_lock); 258 return found; 259 } 260 261 static struct nfsd4_blocked_lock * 262 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 263 struct nfsd_net *nn) 264 { 265 struct nfsd4_blocked_lock *nbl; 266 267 nbl = find_blocked_lock(lo, fh, nn); 268 if (!nbl) { 269 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); 270 if (nbl) { 271 INIT_LIST_HEAD(&nbl->nbl_list); 272 INIT_LIST_HEAD(&nbl->nbl_lru); 273 fh_copy_shallow(&nbl->nbl_fh, fh); 274 locks_init_lock(&nbl->nbl_lock); 275 kref_init(&nbl->nbl_kref); 276 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, 277 &nfsd4_cb_notify_lock_ops, 278 NFSPROC4_CLNT_CB_NOTIFY_LOCK); 279 } 280 } 281 return nbl; 282 } 283 284 static void 285 free_nbl(struct kref *kref) 286 { 287 struct nfsd4_blocked_lock *nbl; 288 289 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref); 290 kfree(nbl); 291 } 292 293 static void 294 free_blocked_lock(struct nfsd4_blocked_lock *nbl) 295 { 296 locks_delete_block(&nbl->nbl_lock); 297 locks_release_private(&nbl->nbl_lock); 298 kref_put(&nbl->nbl_kref, free_nbl); 299 } 300 301 static void 302 remove_blocked_locks(struct nfs4_lockowner *lo) 303 { 304 struct nfs4_client *clp = lo->lo_owner.so_client; 305 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 306 struct nfsd4_blocked_lock *nbl; 307 LIST_HEAD(reaplist); 308 309 /* Dequeue all blocked locks */ 310 spin_lock(&nn->blocked_locks_lock); 311 while (!list_empty(&lo->lo_blocked)) { 312 nbl = list_first_entry(&lo->lo_blocked, 313 struct nfsd4_blocked_lock, 314 nbl_list); 315 list_del_init(&nbl->nbl_list); 316 WARN_ON(list_empty(&nbl->nbl_lru)); 317 list_move(&nbl->nbl_lru, &reaplist); 318 } 319 spin_unlock(&nn->blocked_locks_lock); 320 321 /* Now free them */ 322 while (!list_empty(&reaplist)) { 323 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, 324 nbl_lru); 325 list_del_init(&nbl->nbl_lru); 326 free_blocked_lock(nbl); 327 } 328 } 329 330 static void 331 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) 332 { 333 struct nfsd4_blocked_lock *nbl = container_of(cb, 334 struct nfsd4_blocked_lock, nbl_cb); 335 locks_delete_block(&nbl->nbl_lock); 336 } 337 338 static int 339 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 340 { 341 /* 342 * Since this is just an optimization, we don't try very hard if it 343 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and 344 * just quit trying on anything else. 345 */ 346 switch (task->tk_status) { 347 case -NFS4ERR_DELAY: 348 rpc_delay(task, 1 * HZ); 349 return 0; 350 default: 351 return 1; 352 } 353 } 354 355 static void 356 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) 357 { 358 struct nfsd4_blocked_lock *nbl = container_of(cb, 359 struct nfsd4_blocked_lock, nbl_cb); 360 361 free_blocked_lock(nbl); 362 } 363 364 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { 365 .prepare = nfsd4_cb_notify_lock_prepare, 366 .done = nfsd4_cb_notify_lock_done, 367 .release = nfsd4_cb_notify_lock_release, 368 }; 369 370 /* 371 * We store the NONE, READ, WRITE, and BOTH bits separately in the 372 * st_{access,deny}_bmap field of the stateid, in order to track not 373 * only what share bits are currently in force, but also what 374 * combinations of share bits previous opens have used. This allows us 375 * to enforce the recommendation in 376 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that 377 * the server return an error if the client attempt to downgrade to a 378 * combination of share bits not explicable by closing some of its 379 * previous opens. 380 * 381 * This enforcement is arguably incomplete, since we don't keep 382 * track of access/deny bit combinations; so, e.g., we allow: 383 * 384 * OPEN allow read, deny write 385 * OPEN allow both, deny none 386 * DOWNGRADE allow read, deny none 387 * 388 * which we should reject. 389 * 390 * But you could also argue that our current code is already overkill, 391 * since it only exists to return NFS4ERR_INVAL on incorrect client 392 * behavior. 393 */ 394 static unsigned int 395 bmap_to_share_mode(unsigned long bmap) 396 { 397 int i; 398 unsigned int access = 0; 399 400 for (i = 1; i < 4; i++) { 401 if (test_bit(i, &bmap)) 402 access |= i; 403 } 404 return access; 405 } 406 407 /* set share access for a given stateid */ 408 static inline void 409 set_access(u32 access, struct nfs4_ol_stateid *stp) 410 { 411 unsigned char mask = 1 << access; 412 413 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 414 stp->st_access_bmap |= mask; 415 } 416 417 /* clear share access for a given stateid */ 418 static inline void 419 clear_access(u32 access, struct nfs4_ol_stateid *stp) 420 { 421 unsigned char mask = 1 << access; 422 423 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 424 stp->st_access_bmap &= ~mask; 425 } 426 427 /* test whether a given stateid has access */ 428 static inline bool 429 test_access(u32 access, struct nfs4_ol_stateid *stp) 430 { 431 unsigned char mask = 1 << access; 432 433 return (bool)(stp->st_access_bmap & mask); 434 } 435 436 /* set share deny for a given stateid */ 437 static inline void 438 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 439 { 440 unsigned char mask = 1 << deny; 441 442 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 443 stp->st_deny_bmap |= mask; 444 } 445 446 /* clear share deny for a given stateid */ 447 static inline void 448 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 449 { 450 unsigned char mask = 1 << deny; 451 452 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 453 stp->st_deny_bmap &= ~mask; 454 } 455 456 /* test whether a given stateid is denying specific access */ 457 static inline bool 458 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 459 { 460 unsigned char mask = 1 << deny; 461 462 return (bool)(stp->st_deny_bmap & mask); 463 } 464 465 static int nfs4_access_to_omode(u32 access) 466 { 467 switch (access & NFS4_SHARE_ACCESS_BOTH) { 468 case NFS4_SHARE_ACCESS_READ: 469 return O_RDONLY; 470 case NFS4_SHARE_ACCESS_WRITE: 471 return O_WRONLY; 472 case NFS4_SHARE_ACCESS_BOTH: 473 return O_RDWR; 474 } 475 WARN_ON_ONCE(1); 476 return O_RDONLY; 477 } 478 479 static inline int 480 access_permit_read(struct nfs4_ol_stateid *stp) 481 { 482 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 483 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 484 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 485 } 486 487 static inline int 488 access_permit_write(struct nfs4_ol_stateid *stp) 489 { 490 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 491 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 492 } 493 494 static inline struct nfs4_stateowner * 495 nfs4_get_stateowner(struct nfs4_stateowner *sop) 496 { 497 atomic_inc(&sop->so_count); 498 return sop; 499 } 500 501 static int 502 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 503 { 504 return (sop->so_owner.len == owner->len) && 505 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 506 } 507 508 static struct nfs4_openowner * 509 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, 510 struct nfs4_client *clp) 511 { 512 struct nfs4_stateowner *so; 513 514 lockdep_assert_held(&clp->cl_lock); 515 516 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 517 so_strhash) { 518 if (!so->so_is_open_owner) 519 continue; 520 if (same_owner_str(so, &open->op_owner)) 521 return openowner(nfs4_get_stateowner(so)); 522 } 523 return NULL; 524 } 525 526 static struct nfs4_openowner * 527 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 528 struct nfs4_client *clp) 529 { 530 struct nfs4_openowner *oo; 531 532 spin_lock(&clp->cl_lock); 533 oo = find_openstateowner_str_locked(hashval, open, clp); 534 spin_unlock(&clp->cl_lock); 535 return oo; 536 } 537 538 static inline u32 539 opaque_hashval(const void *ptr, int nbytes) 540 { 541 unsigned char *cptr = (unsigned char *) ptr; 542 543 u32 x = 0; 544 while (nbytes--) { 545 x *= 37; 546 x += *cptr++; 547 } 548 return x; 549 } 550 551 static void nfsd4_free_file_rcu(struct rcu_head *rcu) 552 { 553 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); 554 555 kmem_cache_free(file_slab, fp); 556 } 557 558 void 559 put_nfs4_file(struct nfs4_file *fi) 560 { 561 might_lock(&state_lock); 562 563 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) { 564 hlist_del_rcu(&fi->fi_hash); 565 spin_unlock(&state_lock); 566 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 567 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 568 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 569 } 570 } 571 572 static struct nfsd_file * 573 __nfs4_get_fd(struct nfs4_file *f, int oflag) 574 { 575 if (f->fi_fds[oflag]) 576 return nfsd_file_get(f->fi_fds[oflag]); 577 return NULL; 578 } 579 580 static struct nfsd_file * 581 find_writeable_file_locked(struct nfs4_file *f) 582 { 583 struct nfsd_file *ret; 584 585 lockdep_assert_held(&f->fi_lock); 586 587 ret = __nfs4_get_fd(f, O_WRONLY); 588 if (!ret) 589 ret = __nfs4_get_fd(f, O_RDWR); 590 return ret; 591 } 592 593 static struct nfsd_file * 594 find_writeable_file(struct nfs4_file *f) 595 { 596 struct nfsd_file *ret; 597 598 spin_lock(&f->fi_lock); 599 ret = find_writeable_file_locked(f); 600 spin_unlock(&f->fi_lock); 601 602 return ret; 603 } 604 605 static struct nfsd_file * 606 find_readable_file_locked(struct nfs4_file *f) 607 { 608 struct nfsd_file *ret; 609 610 lockdep_assert_held(&f->fi_lock); 611 612 ret = __nfs4_get_fd(f, O_RDONLY); 613 if (!ret) 614 ret = __nfs4_get_fd(f, O_RDWR); 615 return ret; 616 } 617 618 static struct nfsd_file * 619 find_readable_file(struct nfs4_file *f) 620 { 621 struct nfsd_file *ret; 622 623 spin_lock(&f->fi_lock); 624 ret = find_readable_file_locked(f); 625 spin_unlock(&f->fi_lock); 626 627 return ret; 628 } 629 630 struct nfsd_file * 631 find_any_file(struct nfs4_file *f) 632 { 633 struct nfsd_file *ret; 634 635 if (!f) 636 return NULL; 637 spin_lock(&f->fi_lock); 638 ret = __nfs4_get_fd(f, O_RDWR); 639 if (!ret) { 640 ret = __nfs4_get_fd(f, O_WRONLY); 641 if (!ret) 642 ret = __nfs4_get_fd(f, O_RDONLY); 643 } 644 spin_unlock(&f->fi_lock); 645 return ret; 646 } 647 648 static struct nfsd_file *find_deleg_file(struct nfs4_file *f) 649 { 650 struct nfsd_file *ret = NULL; 651 652 spin_lock(&f->fi_lock); 653 if (f->fi_deleg_file) 654 ret = nfsd_file_get(f->fi_deleg_file); 655 spin_unlock(&f->fi_lock); 656 return ret; 657 } 658 659 static atomic_long_t num_delegations; 660 unsigned long max_delegations; 661 662 /* 663 * Open owner state (share locks) 664 */ 665 666 /* hash tables for lock and open owners */ 667 #define OWNER_HASH_BITS 8 668 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 669 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 670 671 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 672 { 673 unsigned int ret; 674 675 ret = opaque_hashval(ownername->data, ownername->len); 676 return ret & OWNER_HASH_MASK; 677 } 678 679 /* hash table for nfs4_file */ 680 #define FILE_HASH_BITS 8 681 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 682 683 static unsigned int file_hashval(struct svc_fh *fh) 684 { 685 struct inode *inode = d_inode(fh->fh_dentry); 686 687 /* XXX: why not (here & in file cache) use inode? */ 688 return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS); 689 } 690 691 static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; 692 693 static void 694 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 695 { 696 lockdep_assert_held(&fp->fi_lock); 697 698 if (access & NFS4_SHARE_ACCESS_WRITE) 699 atomic_inc(&fp->fi_access[O_WRONLY]); 700 if (access & NFS4_SHARE_ACCESS_READ) 701 atomic_inc(&fp->fi_access[O_RDONLY]); 702 } 703 704 static __be32 705 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 706 { 707 lockdep_assert_held(&fp->fi_lock); 708 709 /* Does this access mode make sense? */ 710 if (access & ~NFS4_SHARE_ACCESS_BOTH) 711 return nfserr_inval; 712 713 /* Does it conflict with a deny mode already set? */ 714 if ((access & fp->fi_share_deny) != 0) 715 return nfserr_share_denied; 716 717 __nfs4_file_get_access(fp, access); 718 return nfs_ok; 719 } 720 721 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 722 { 723 /* Common case is that there is no deny mode. */ 724 if (deny) { 725 /* Does this deny mode make sense? */ 726 if (deny & ~NFS4_SHARE_DENY_BOTH) 727 return nfserr_inval; 728 729 if ((deny & NFS4_SHARE_DENY_READ) && 730 atomic_read(&fp->fi_access[O_RDONLY])) 731 return nfserr_share_denied; 732 733 if ((deny & NFS4_SHARE_DENY_WRITE) && 734 atomic_read(&fp->fi_access[O_WRONLY])) 735 return nfserr_share_denied; 736 } 737 return nfs_ok; 738 } 739 740 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 741 { 742 might_lock(&fp->fi_lock); 743 744 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 745 struct nfsd_file *f1 = NULL; 746 struct nfsd_file *f2 = NULL; 747 748 swap(f1, fp->fi_fds[oflag]); 749 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 750 swap(f2, fp->fi_fds[O_RDWR]); 751 spin_unlock(&fp->fi_lock); 752 if (f1) 753 nfsd_file_put(f1); 754 if (f2) 755 nfsd_file_put(f2); 756 } 757 } 758 759 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 760 { 761 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 762 763 if (access & NFS4_SHARE_ACCESS_WRITE) 764 __nfs4_file_put_access(fp, O_WRONLY); 765 if (access & NFS4_SHARE_ACCESS_READ) 766 __nfs4_file_put_access(fp, O_RDONLY); 767 } 768 769 /* 770 * Allocate a new open/delegation state counter. This is needed for 771 * pNFS for proper return on close semantics. 772 * 773 * Note that we only allocate it for pNFS-enabled exports, otherwise 774 * all pointers to struct nfs4_clnt_odstate are always NULL. 775 */ 776 static struct nfs4_clnt_odstate * 777 alloc_clnt_odstate(struct nfs4_client *clp) 778 { 779 struct nfs4_clnt_odstate *co; 780 781 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 782 if (co) { 783 co->co_client = clp; 784 refcount_set(&co->co_odcount, 1); 785 } 786 return co; 787 } 788 789 static void 790 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 791 { 792 struct nfs4_file *fp = co->co_file; 793 794 lockdep_assert_held(&fp->fi_lock); 795 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 796 } 797 798 static inline void 799 get_clnt_odstate(struct nfs4_clnt_odstate *co) 800 { 801 if (co) 802 refcount_inc(&co->co_odcount); 803 } 804 805 static void 806 put_clnt_odstate(struct nfs4_clnt_odstate *co) 807 { 808 struct nfs4_file *fp; 809 810 if (!co) 811 return; 812 813 fp = co->co_file; 814 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 815 list_del(&co->co_perfile); 816 spin_unlock(&fp->fi_lock); 817 818 nfsd4_return_all_file_layouts(co->co_client, fp); 819 kmem_cache_free(odstate_slab, co); 820 } 821 } 822 823 static struct nfs4_clnt_odstate * 824 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 825 { 826 struct nfs4_clnt_odstate *co; 827 struct nfs4_client *cl; 828 829 if (!new) 830 return NULL; 831 832 cl = new->co_client; 833 834 spin_lock(&fp->fi_lock); 835 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 836 if (co->co_client == cl) { 837 get_clnt_odstate(co); 838 goto out; 839 } 840 } 841 co = new; 842 co->co_file = fp; 843 hash_clnt_odstate_locked(new); 844 out: 845 spin_unlock(&fp->fi_lock); 846 return co; 847 } 848 849 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 850 void (*sc_free)(struct nfs4_stid *)) 851 { 852 struct nfs4_stid *stid; 853 int new_id; 854 855 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 856 if (!stid) 857 return NULL; 858 859 idr_preload(GFP_KERNEL); 860 spin_lock(&cl->cl_lock); 861 /* Reserving 0 for start of file in nfsdfs "states" file: */ 862 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); 863 spin_unlock(&cl->cl_lock); 864 idr_preload_end(); 865 if (new_id < 0) 866 goto out_free; 867 868 stid->sc_free = sc_free; 869 stid->sc_client = cl; 870 stid->sc_stateid.si_opaque.so_id = new_id; 871 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 872 /* Will be incremented before return to client: */ 873 refcount_set(&stid->sc_count, 1); 874 spin_lock_init(&stid->sc_lock); 875 INIT_LIST_HEAD(&stid->sc_cp_list); 876 877 /* 878 * It shouldn't be a problem to reuse an opaque stateid value. 879 * I don't think it is for 4.1. But with 4.0 I worry that, for 880 * example, a stray write retransmission could be accepted by 881 * the server when it should have been rejected. Therefore, 882 * adopt a trick from the sctp code to attempt to maximize the 883 * amount of time until an id is reused, by ensuring they always 884 * "increase" (mod INT_MAX): 885 */ 886 return stid; 887 out_free: 888 kmem_cache_free(slab, stid); 889 return NULL; 890 } 891 892 /* 893 * Create a unique stateid_t to represent each COPY. 894 */ 895 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, 896 unsigned char sc_type) 897 { 898 int new_id; 899 900 stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; 901 stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 902 stid->sc_type = sc_type; 903 904 idr_preload(GFP_KERNEL); 905 spin_lock(&nn->s2s_cp_lock); 906 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); 907 stid->stid.si_opaque.so_id = new_id; 908 stid->stid.si_generation = 1; 909 spin_unlock(&nn->s2s_cp_lock); 910 idr_preload_end(); 911 if (new_id < 0) 912 return 0; 913 return 1; 914 } 915 916 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 917 { 918 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); 919 } 920 921 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 922 struct nfs4_stid *p_stid) 923 { 924 struct nfs4_cpntf_state *cps; 925 926 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); 927 if (!cps) 928 return NULL; 929 cps->cpntf_time = ktime_get_boottime_seconds(); 930 refcount_set(&cps->cp_stateid.sc_count, 1); 931 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) 932 goto out_free; 933 spin_lock(&nn->s2s_cp_lock); 934 list_add(&cps->cp_list, &p_stid->sc_cp_list); 935 spin_unlock(&nn->s2s_cp_lock); 936 return cps; 937 out_free: 938 kfree(cps); 939 return NULL; 940 } 941 942 void nfs4_free_copy_state(struct nfsd4_copy *copy) 943 { 944 struct nfsd_net *nn; 945 946 WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID); 947 nn = net_generic(copy->cp_clp->net, nfsd_net_id); 948 spin_lock(&nn->s2s_cp_lock); 949 idr_remove(&nn->s2s_cp_stateids, 950 copy->cp_stateid.stid.si_opaque.so_id); 951 spin_unlock(&nn->s2s_cp_lock); 952 } 953 954 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) 955 { 956 struct nfs4_cpntf_state *cps; 957 struct nfsd_net *nn; 958 959 nn = net_generic(net, nfsd_net_id); 960 spin_lock(&nn->s2s_cp_lock); 961 while (!list_empty(&stid->sc_cp_list)) { 962 cps = list_first_entry(&stid->sc_cp_list, 963 struct nfs4_cpntf_state, cp_list); 964 _free_cpntf_state_locked(nn, cps); 965 } 966 spin_unlock(&nn->s2s_cp_lock); 967 } 968 969 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 970 { 971 struct nfs4_stid *stid; 972 973 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); 974 if (!stid) 975 return NULL; 976 977 return openlockstateid(stid); 978 } 979 980 static void nfs4_free_deleg(struct nfs4_stid *stid) 981 { 982 kmem_cache_free(deleg_slab, stid); 983 atomic_long_dec(&num_delegations); 984 } 985 986 /* 987 * When we recall a delegation, we should be careful not to hand it 988 * out again straight away. 989 * To ensure this we keep a pair of bloom filters ('new' and 'old') 990 * in which the filehandles of recalled delegations are "stored". 991 * If a filehandle appear in either filter, a delegation is blocked. 992 * When a delegation is recalled, the filehandle is stored in the "new" 993 * filter. 994 * Every 30 seconds we swap the filters and clear the "new" one, 995 * unless both are empty of course. 996 * 997 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 998 * low 3 bytes as hash-table indices. 999 * 1000 * 'blocked_delegations_lock', which is always taken in block_delegations(), 1001 * is used to manage concurrent access. Testing does not need the lock 1002 * except when swapping the two filters. 1003 */ 1004 static DEFINE_SPINLOCK(blocked_delegations_lock); 1005 static struct bloom_pair { 1006 int entries, old_entries; 1007 time64_t swap_time; 1008 int new; /* index into 'set' */ 1009 DECLARE_BITMAP(set[2], 256); 1010 } blocked_delegations; 1011 1012 static int delegation_blocked(struct knfsd_fh *fh) 1013 { 1014 u32 hash; 1015 struct bloom_pair *bd = &blocked_delegations; 1016 1017 if (bd->entries == 0) 1018 return 0; 1019 if (ktime_get_seconds() - bd->swap_time > 30) { 1020 spin_lock(&blocked_delegations_lock); 1021 if (ktime_get_seconds() - bd->swap_time > 30) { 1022 bd->entries -= bd->old_entries; 1023 bd->old_entries = bd->entries; 1024 memset(bd->set[bd->new], 0, 1025 sizeof(bd->set[0])); 1026 bd->new = 1-bd->new; 1027 bd->swap_time = ktime_get_seconds(); 1028 } 1029 spin_unlock(&blocked_delegations_lock); 1030 } 1031 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1032 if (test_bit(hash&255, bd->set[0]) && 1033 test_bit((hash>>8)&255, bd->set[0]) && 1034 test_bit((hash>>16)&255, bd->set[0])) 1035 return 1; 1036 1037 if (test_bit(hash&255, bd->set[1]) && 1038 test_bit((hash>>8)&255, bd->set[1]) && 1039 test_bit((hash>>16)&255, bd->set[1])) 1040 return 1; 1041 1042 return 0; 1043 } 1044 1045 static void block_delegations(struct knfsd_fh *fh) 1046 { 1047 u32 hash; 1048 struct bloom_pair *bd = &blocked_delegations; 1049 1050 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1051 1052 spin_lock(&blocked_delegations_lock); 1053 __set_bit(hash&255, bd->set[bd->new]); 1054 __set_bit((hash>>8)&255, bd->set[bd->new]); 1055 __set_bit((hash>>16)&255, bd->set[bd->new]); 1056 if (bd->entries == 0) 1057 bd->swap_time = ktime_get_seconds(); 1058 bd->entries += 1; 1059 spin_unlock(&blocked_delegations_lock); 1060 } 1061 1062 static struct nfs4_delegation * 1063 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, 1064 struct svc_fh *current_fh, 1065 struct nfs4_clnt_odstate *odstate) 1066 { 1067 struct nfs4_delegation *dp; 1068 long n; 1069 1070 dprintk("NFSD alloc_init_deleg\n"); 1071 n = atomic_long_inc_return(&num_delegations); 1072 if (n < 0 || n > max_delegations) 1073 goto out_dec; 1074 if (delegation_blocked(¤t_fh->fh_handle)) 1075 goto out_dec; 1076 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); 1077 if (dp == NULL) 1078 goto out_dec; 1079 1080 /* 1081 * delegation seqid's are never incremented. The 4.1 special 1082 * meaning of seqid 0 isn't meaningful, really, but let's avoid 1083 * 0 anyway just for consistency and use 1: 1084 */ 1085 dp->dl_stid.sc_stateid.si_generation = 1; 1086 INIT_LIST_HEAD(&dp->dl_perfile); 1087 INIT_LIST_HEAD(&dp->dl_perclnt); 1088 INIT_LIST_HEAD(&dp->dl_recall_lru); 1089 dp->dl_clnt_odstate = odstate; 1090 get_clnt_odstate(odstate); 1091 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 1092 dp->dl_retries = 1; 1093 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 1094 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 1095 get_nfs4_file(fp); 1096 dp->dl_stid.sc_file = fp; 1097 return dp; 1098 out_dec: 1099 atomic_long_dec(&num_delegations); 1100 return NULL; 1101 } 1102 1103 void 1104 nfs4_put_stid(struct nfs4_stid *s) 1105 { 1106 struct nfs4_file *fp = s->sc_file; 1107 struct nfs4_client *clp = s->sc_client; 1108 1109 might_lock(&clp->cl_lock); 1110 1111 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 1112 wake_up_all(&close_wq); 1113 return; 1114 } 1115 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1116 nfs4_free_cpntf_statelist(clp->net, s); 1117 spin_unlock(&clp->cl_lock); 1118 s->sc_free(s); 1119 if (fp) 1120 put_nfs4_file(fp); 1121 } 1122 1123 void 1124 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) 1125 { 1126 stateid_t *src = &stid->sc_stateid; 1127 1128 spin_lock(&stid->sc_lock); 1129 if (unlikely(++src->si_generation == 0)) 1130 src->si_generation = 1; 1131 memcpy(dst, src, sizeof(*dst)); 1132 spin_unlock(&stid->sc_lock); 1133 } 1134 1135 static void put_deleg_file(struct nfs4_file *fp) 1136 { 1137 struct nfsd_file *nf = NULL; 1138 1139 spin_lock(&fp->fi_lock); 1140 if (--fp->fi_delegees == 0) 1141 swap(nf, fp->fi_deleg_file); 1142 spin_unlock(&fp->fi_lock); 1143 1144 if (nf) 1145 nfsd_file_put(nf); 1146 } 1147 1148 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) 1149 { 1150 struct nfs4_file *fp = dp->dl_stid.sc_file; 1151 struct nfsd_file *nf = fp->fi_deleg_file; 1152 1153 WARN_ON_ONCE(!fp->fi_delegees); 1154 1155 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); 1156 put_deleg_file(fp); 1157 } 1158 1159 static void destroy_unhashed_deleg(struct nfs4_delegation *dp) 1160 { 1161 put_clnt_odstate(dp->dl_clnt_odstate); 1162 nfs4_unlock_deleg_lease(dp); 1163 nfs4_put_stid(&dp->dl_stid); 1164 } 1165 1166 void nfs4_unhash_stid(struct nfs4_stid *s) 1167 { 1168 s->sc_type = 0; 1169 } 1170 1171 /** 1172 * nfs4_delegation_exists - Discover if this delegation already exists 1173 * @clp: a pointer to the nfs4_client we're granting a delegation to 1174 * @fp: a pointer to the nfs4_file we're granting a delegation on 1175 * 1176 * Return: 1177 * On success: true iff an existing delegation is found 1178 */ 1179 1180 static bool 1181 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp) 1182 { 1183 struct nfs4_delegation *searchdp = NULL; 1184 struct nfs4_client *searchclp = NULL; 1185 1186 lockdep_assert_held(&state_lock); 1187 lockdep_assert_held(&fp->fi_lock); 1188 1189 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { 1190 searchclp = searchdp->dl_stid.sc_client; 1191 if (clp == searchclp) { 1192 return true; 1193 } 1194 } 1195 return false; 1196 } 1197 1198 /** 1199 * hash_delegation_locked - Add a delegation to the appropriate lists 1200 * @dp: a pointer to the nfs4_delegation we are adding. 1201 * @fp: a pointer to the nfs4_file we're granting a delegation on 1202 * 1203 * Return: 1204 * On success: NULL if the delegation was successfully hashed. 1205 * 1206 * On error: -EAGAIN if one was previously granted to this 1207 * nfs4_client for this nfs4_file. Delegation is not hashed. 1208 * 1209 */ 1210 1211 static int 1212 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 1213 { 1214 struct nfs4_client *clp = dp->dl_stid.sc_client; 1215 1216 lockdep_assert_held(&state_lock); 1217 lockdep_assert_held(&fp->fi_lock); 1218 1219 if (nfs4_delegation_exists(clp, fp)) 1220 return -EAGAIN; 1221 refcount_inc(&dp->dl_stid.sc_count); 1222 dp->dl_stid.sc_type = NFS4_DELEG_STID; 1223 list_add(&dp->dl_perfile, &fp->fi_delegations); 1224 list_add(&dp->dl_perclnt, &clp->cl_delegations); 1225 return 0; 1226 } 1227 1228 static bool delegation_hashed(struct nfs4_delegation *dp) 1229 { 1230 return !(list_empty(&dp->dl_perfile)); 1231 } 1232 1233 static bool 1234 unhash_delegation_locked(struct nfs4_delegation *dp) 1235 { 1236 struct nfs4_file *fp = dp->dl_stid.sc_file; 1237 1238 lockdep_assert_held(&state_lock); 1239 1240 if (!delegation_hashed(dp)) 1241 return false; 1242 1243 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; 1244 /* Ensure that deleg break won't try to requeue it */ 1245 ++dp->dl_time; 1246 spin_lock(&fp->fi_lock); 1247 list_del_init(&dp->dl_perclnt); 1248 list_del_init(&dp->dl_recall_lru); 1249 list_del_init(&dp->dl_perfile); 1250 spin_unlock(&fp->fi_lock); 1251 return true; 1252 } 1253 1254 static void destroy_delegation(struct nfs4_delegation *dp) 1255 { 1256 bool unhashed; 1257 1258 spin_lock(&state_lock); 1259 unhashed = unhash_delegation_locked(dp); 1260 spin_unlock(&state_lock); 1261 if (unhashed) 1262 destroy_unhashed_deleg(dp); 1263 } 1264 1265 static void revoke_delegation(struct nfs4_delegation *dp) 1266 { 1267 struct nfs4_client *clp = dp->dl_stid.sc_client; 1268 1269 WARN_ON(!list_empty(&dp->dl_recall_lru)); 1270 1271 if (clp->cl_minorversion) { 1272 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 1273 refcount_inc(&dp->dl_stid.sc_count); 1274 spin_lock(&clp->cl_lock); 1275 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 1276 spin_unlock(&clp->cl_lock); 1277 } 1278 destroy_unhashed_deleg(dp); 1279 } 1280 1281 /* 1282 * SETCLIENTID state 1283 */ 1284 1285 static unsigned int clientid_hashval(u32 id) 1286 { 1287 return id & CLIENT_HASH_MASK; 1288 } 1289 1290 static unsigned int clientstr_hashval(struct xdr_netobj name) 1291 { 1292 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; 1293 } 1294 1295 /* 1296 * A stateid that had a deny mode associated with it is being released 1297 * or downgraded. Recalculate the deny mode on the file. 1298 */ 1299 static void 1300 recalculate_deny_mode(struct nfs4_file *fp) 1301 { 1302 struct nfs4_ol_stateid *stp; 1303 1304 spin_lock(&fp->fi_lock); 1305 fp->fi_share_deny = 0; 1306 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) 1307 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 1308 spin_unlock(&fp->fi_lock); 1309 } 1310 1311 static void 1312 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 1313 { 1314 int i; 1315 bool change = false; 1316 1317 for (i = 1; i < 4; i++) { 1318 if ((i & deny) != i) { 1319 change = true; 1320 clear_deny(i, stp); 1321 } 1322 } 1323 1324 /* Recalculate per-file deny mode if there was a change */ 1325 if (change) 1326 recalculate_deny_mode(stp->st_stid.sc_file); 1327 } 1328 1329 /* release all access and file references for a given stateid */ 1330 static void 1331 release_all_access(struct nfs4_ol_stateid *stp) 1332 { 1333 int i; 1334 struct nfs4_file *fp = stp->st_stid.sc_file; 1335 1336 if (fp && stp->st_deny_bmap != 0) 1337 recalculate_deny_mode(fp); 1338 1339 for (i = 1; i < 4; i++) { 1340 if (test_access(i, stp)) 1341 nfs4_file_put_access(stp->st_stid.sc_file, i); 1342 clear_access(i, stp); 1343 } 1344 } 1345 1346 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) 1347 { 1348 kfree(sop->so_owner.data); 1349 sop->so_ops->so_free(sop); 1350 } 1351 1352 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 1353 { 1354 struct nfs4_client *clp = sop->so_client; 1355 1356 might_lock(&clp->cl_lock); 1357 1358 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1359 return; 1360 sop->so_ops->so_unhash(sop); 1361 spin_unlock(&clp->cl_lock); 1362 nfs4_free_stateowner(sop); 1363 } 1364 1365 static bool 1366 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) 1367 { 1368 return list_empty(&stp->st_perfile); 1369 } 1370 1371 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1372 { 1373 struct nfs4_file *fp = stp->st_stid.sc_file; 1374 1375 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1376 1377 if (list_empty(&stp->st_perfile)) 1378 return false; 1379 1380 spin_lock(&fp->fi_lock); 1381 list_del_init(&stp->st_perfile); 1382 spin_unlock(&fp->fi_lock); 1383 list_del(&stp->st_perstateowner); 1384 return true; 1385 } 1386 1387 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1388 { 1389 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1390 1391 put_clnt_odstate(stp->st_clnt_odstate); 1392 release_all_access(stp); 1393 if (stp->st_stateowner) 1394 nfs4_put_stateowner(stp->st_stateowner); 1395 kmem_cache_free(stateid_slab, stid); 1396 } 1397 1398 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1399 { 1400 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1401 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1402 struct nfsd_file *nf; 1403 1404 nf = find_any_file(stp->st_stid.sc_file); 1405 if (nf) { 1406 get_file(nf->nf_file); 1407 filp_close(nf->nf_file, (fl_owner_t)lo); 1408 nfsd_file_put(nf); 1409 } 1410 nfs4_free_ol_stateid(stid); 1411 } 1412 1413 /* 1414 * Put the persistent reference to an already unhashed generic stateid, while 1415 * holding the cl_lock. If it's the last reference, then put it onto the 1416 * reaplist for later destruction. 1417 */ 1418 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1419 struct list_head *reaplist) 1420 { 1421 struct nfs4_stid *s = &stp->st_stid; 1422 struct nfs4_client *clp = s->sc_client; 1423 1424 lockdep_assert_held(&clp->cl_lock); 1425 1426 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1427 1428 if (!refcount_dec_and_test(&s->sc_count)) { 1429 wake_up_all(&close_wq); 1430 return; 1431 } 1432 1433 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1434 list_add(&stp->st_locks, reaplist); 1435 } 1436 1437 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1438 { 1439 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1440 1441 if (!unhash_ol_stateid(stp)) 1442 return false; 1443 list_del_init(&stp->st_locks); 1444 nfs4_unhash_stid(&stp->st_stid); 1445 return true; 1446 } 1447 1448 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1449 { 1450 struct nfs4_client *clp = stp->st_stid.sc_client; 1451 bool unhashed; 1452 1453 spin_lock(&clp->cl_lock); 1454 unhashed = unhash_lock_stateid(stp); 1455 spin_unlock(&clp->cl_lock); 1456 if (unhashed) 1457 nfs4_put_stid(&stp->st_stid); 1458 } 1459 1460 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1461 { 1462 struct nfs4_client *clp = lo->lo_owner.so_client; 1463 1464 lockdep_assert_held(&clp->cl_lock); 1465 1466 list_del_init(&lo->lo_owner.so_strhash); 1467 } 1468 1469 /* 1470 * Free a list of generic stateids that were collected earlier after being 1471 * fully unhashed. 1472 */ 1473 static void 1474 free_ol_stateid_reaplist(struct list_head *reaplist) 1475 { 1476 struct nfs4_ol_stateid *stp; 1477 struct nfs4_file *fp; 1478 1479 might_sleep(); 1480 1481 while (!list_empty(reaplist)) { 1482 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1483 st_locks); 1484 list_del(&stp->st_locks); 1485 fp = stp->st_stid.sc_file; 1486 stp->st_stid.sc_free(&stp->st_stid); 1487 if (fp) 1488 put_nfs4_file(fp); 1489 } 1490 } 1491 1492 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1493 struct list_head *reaplist) 1494 { 1495 struct nfs4_ol_stateid *stp; 1496 1497 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); 1498 1499 while (!list_empty(&open_stp->st_locks)) { 1500 stp = list_entry(open_stp->st_locks.next, 1501 struct nfs4_ol_stateid, st_locks); 1502 WARN_ON(!unhash_lock_stateid(stp)); 1503 put_ol_stateid_locked(stp, reaplist); 1504 } 1505 } 1506 1507 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, 1508 struct list_head *reaplist) 1509 { 1510 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1511 1512 if (!unhash_ol_stateid(stp)) 1513 return false; 1514 release_open_stateid_locks(stp, reaplist); 1515 return true; 1516 } 1517 1518 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1519 { 1520 LIST_HEAD(reaplist); 1521 1522 spin_lock(&stp->st_stid.sc_client->cl_lock); 1523 if (unhash_open_stateid(stp, &reaplist)) 1524 put_ol_stateid_locked(stp, &reaplist); 1525 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1526 free_ol_stateid_reaplist(&reaplist); 1527 } 1528 1529 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1530 { 1531 struct nfs4_client *clp = oo->oo_owner.so_client; 1532 1533 lockdep_assert_held(&clp->cl_lock); 1534 1535 list_del_init(&oo->oo_owner.so_strhash); 1536 list_del_init(&oo->oo_perclient); 1537 } 1538 1539 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1540 { 1541 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1542 nfsd_net_id); 1543 struct nfs4_ol_stateid *s; 1544 1545 spin_lock(&nn->client_lock); 1546 s = oo->oo_last_closed_stid; 1547 if (s) { 1548 list_del_init(&oo->oo_close_lru); 1549 oo->oo_last_closed_stid = NULL; 1550 } 1551 spin_unlock(&nn->client_lock); 1552 if (s) 1553 nfs4_put_stid(&s->st_stid); 1554 } 1555 1556 static void release_openowner(struct nfs4_openowner *oo) 1557 { 1558 struct nfs4_ol_stateid *stp; 1559 struct nfs4_client *clp = oo->oo_owner.so_client; 1560 struct list_head reaplist; 1561 1562 INIT_LIST_HEAD(&reaplist); 1563 1564 spin_lock(&clp->cl_lock); 1565 unhash_openowner_locked(oo); 1566 while (!list_empty(&oo->oo_owner.so_stateids)) { 1567 stp = list_first_entry(&oo->oo_owner.so_stateids, 1568 struct nfs4_ol_stateid, st_perstateowner); 1569 if (unhash_open_stateid(stp, &reaplist)) 1570 put_ol_stateid_locked(stp, &reaplist); 1571 } 1572 spin_unlock(&clp->cl_lock); 1573 free_ol_stateid_reaplist(&reaplist); 1574 release_last_closed_stateid(oo); 1575 nfs4_put_stateowner(&oo->oo_owner); 1576 } 1577 1578 static inline int 1579 hash_sessionid(struct nfs4_sessionid *sessionid) 1580 { 1581 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1582 1583 return sid->sequence % SESSION_HASH_SIZE; 1584 } 1585 1586 #ifdef CONFIG_SUNRPC_DEBUG 1587 static inline void 1588 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1589 { 1590 u32 *ptr = (u32 *)(&sessionid->data[0]); 1591 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1592 } 1593 #else 1594 static inline void 1595 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1596 { 1597 } 1598 #endif 1599 1600 /* 1601 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1602 * won't be used for replay. 1603 */ 1604 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1605 { 1606 struct nfs4_stateowner *so = cstate->replay_owner; 1607 1608 if (nfserr == nfserr_replay_me) 1609 return; 1610 1611 if (!seqid_mutating_err(ntohl(nfserr))) { 1612 nfsd4_cstate_clear_replay(cstate); 1613 return; 1614 } 1615 if (!so) 1616 return; 1617 if (so->so_is_open_owner) 1618 release_last_closed_stateid(openowner(so)); 1619 so->so_seqid++; 1620 return; 1621 } 1622 1623 static void 1624 gen_sessionid(struct nfsd4_session *ses) 1625 { 1626 struct nfs4_client *clp = ses->se_client; 1627 struct nfsd4_sessionid *sid; 1628 1629 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1630 sid->clientid = clp->cl_clientid; 1631 sid->sequence = current_sessionid++; 1632 sid->reserved = 0; 1633 } 1634 1635 /* 1636 * The protocol defines ca_maxresponssize_cached to include the size of 1637 * the rpc header, but all we need to cache is the data starting after 1638 * the end of the initial SEQUENCE operation--the rest we regenerate 1639 * each time. Therefore we can advertise a ca_maxresponssize_cached 1640 * value that is the number of bytes in our cache plus a few additional 1641 * bytes. In order to stay on the safe side, and not promise more than 1642 * we can cache, those additional bytes must be the minimum possible: 24 1643 * bytes of rpc header (xid through accept state, with AUTH_NULL 1644 * verifier), 12 for the compound header (with zero-length tag), and 44 1645 * for the SEQUENCE op response: 1646 */ 1647 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1648 1649 static void 1650 free_session_slots(struct nfsd4_session *ses) 1651 { 1652 int i; 1653 1654 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { 1655 free_svc_cred(&ses->se_slots[i]->sl_cred); 1656 kfree(ses->se_slots[i]); 1657 } 1658 } 1659 1660 /* 1661 * We don't actually need to cache the rpc and session headers, so we 1662 * can allocate a little less for each slot: 1663 */ 1664 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 1665 { 1666 u32 size; 1667 1668 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 1669 size = 0; 1670 else 1671 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 1672 return size + sizeof(struct nfsd4_slot); 1673 } 1674 1675 /* 1676 * XXX: If we run out of reserved DRC memory we could (up to a point) 1677 * re-negotiate active sessions and reduce their slot usage to make 1678 * room for new connections. For now we just fail the create session. 1679 */ 1680 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 1681 { 1682 u32 slotsize = slot_bytes(ca); 1683 u32 num = ca->maxreqs; 1684 unsigned long avail, total_avail; 1685 unsigned int scale_factor; 1686 1687 spin_lock(&nfsd_drc_lock); 1688 if (nfsd_drc_max_mem > nfsd_drc_mem_used) 1689 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; 1690 else 1691 /* We have handed out more space than we chose in 1692 * set_max_drc() to allow. That isn't really a 1693 * problem as long as that doesn't make us think we 1694 * have lots more due to integer overflow. 1695 */ 1696 total_avail = 0; 1697 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); 1698 /* 1699 * Never use more than a fraction of the remaining memory, 1700 * unless it's the only way to give this client a slot. 1701 * The chosen fraction is either 1/8 or 1/number of threads, 1702 * whichever is smaller. This ensures there are adequate 1703 * slots to support multiple clients per thread. 1704 * Give the client one slot even if that would require 1705 * over-allocation--it is better than failure. 1706 */ 1707 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); 1708 1709 avail = clamp_t(unsigned long, avail, slotsize, 1710 total_avail/scale_factor); 1711 num = min_t(int, num, avail / slotsize); 1712 num = max_t(int, num, 1); 1713 nfsd_drc_mem_used += num * slotsize; 1714 spin_unlock(&nfsd_drc_lock); 1715 1716 return num; 1717 } 1718 1719 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 1720 { 1721 int slotsize = slot_bytes(ca); 1722 1723 spin_lock(&nfsd_drc_lock); 1724 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 1725 spin_unlock(&nfsd_drc_lock); 1726 } 1727 1728 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 1729 struct nfsd4_channel_attrs *battrs) 1730 { 1731 int numslots = fattrs->maxreqs; 1732 int slotsize = slot_bytes(fattrs); 1733 struct nfsd4_session *new; 1734 int mem, i; 1735 1736 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 1737 + sizeof(struct nfsd4_session) > PAGE_SIZE); 1738 mem = numslots * sizeof(struct nfsd4_slot *); 1739 1740 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 1741 if (!new) 1742 return NULL; 1743 /* allocate each struct nfsd4_slot and data cache in one piece */ 1744 for (i = 0; i < numslots; i++) { 1745 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 1746 if (!new->se_slots[i]) 1747 goto out_free; 1748 } 1749 1750 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 1751 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); 1752 1753 return new; 1754 out_free: 1755 while (i--) 1756 kfree(new->se_slots[i]); 1757 kfree(new); 1758 return NULL; 1759 } 1760 1761 static void free_conn(struct nfsd4_conn *c) 1762 { 1763 svc_xprt_put(c->cn_xprt); 1764 kfree(c); 1765 } 1766 1767 static void nfsd4_conn_lost(struct svc_xpt_user *u) 1768 { 1769 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 1770 struct nfs4_client *clp = c->cn_session->se_client; 1771 1772 trace_nfsd_cb_lost(clp); 1773 1774 spin_lock(&clp->cl_lock); 1775 if (!list_empty(&c->cn_persession)) { 1776 list_del(&c->cn_persession); 1777 free_conn(c); 1778 } 1779 nfsd4_probe_callback(clp); 1780 spin_unlock(&clp->cl_lock); 1781 } 1782 1783 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 1784 { 1785 struct nfsd4_conn *conn; 1786 1787 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 1788 if (!conn) 1789 return NULL; 1790 svc_xprt_get(rqstp->rq_xprt); 1791 conn->cn_xprt = rqstp->rq_xprt; 1792 conn->cn_flags = flags; 1793 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 1794 return conn; 1795 } 1796 1797 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1798 { 1799 conn->cn_session = ses; 1800 list_add(&conn->cn_persession, &ses->se_conns); 1801 } 1802 1803 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1804 { 1805 struct nfs4_client *clp = ses->se_client; 1806 1807 spin_lock(&clp->cl_lock); 1808 __nfsd4_hash_conn(conn, ses); 1809 spin_unlock(&clp->cl_lock); 1810 } 1811 1812 static int nfsd4_register_conn(struct nfsd4_conn *conn) 1813 { 1814 conn->cn_xpt_user.callback = nfsd4_conn_lost; 1815 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 1816 } 1817 1818 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 1819 { 1820 int ret; 1821 1822 nfsd4_hash_conn(conn, ses); 1823 ret = nfsd4_register_conn(conn); 1824 if (ret) 1825 /* oops; xprt is already down: */ 1826 nfsd4_conn_lost(&conn->cn_xpt_user); 1827 /* We may have gained or lost a callback channel: */ 1828 nfsd4_probe_callback_sync(ses->se_client); 1829 } 1830 1831 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 1832 { 1833 u32 dir = NFS4_CDFC4_FORE; 1834 1835 if (cses->flags & SESSION4_BACK_CHAN) 1836 dir |= NFS4_CDFC4_BACK; 1837 return alloc_conn(rqstp, dir); 1838 } 1839 1840 /* must be called under client_lock */ 1841 static void nfsd4_del_conns(struct nfsd4_session *s) 1842 { 1843 struct nfs4_client *clp = s->se_client; 1844 struct nfsd4_conn *c; 1845 1846 spin_lock(&clp->cl_lock); 1847 while (!list_empty(&s->se_conns)) { 1848 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 1849 list_del_init(&c->cn_persession); 1850 spin_unlock(&clp->cl_lock); 1851 1852 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 1853 free_conn(c); 1854 1855 spin_lock(&clp->cl_lock); 1856 } 1857 spin_unlock(&clp->cl_lock); 1858 } 1859 1860 static void __free_session(struct nfsd4_session *ses) 1861 { 1862 free_session_slots(ses); 1863 kfree(ses); 1864 } 1865 1866 static void free_session(struct nfsd4_session *ses) 1867 { 1868 nfsd4_del_conns(ses); 1869 nfsd4_put_drc_mem(&ses->se_fchannel); 1870 __free_session(ses); 1871 } 1872 1873 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 1874 { 1875 int idx; 1876 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1877 1878 new->se_client = clp; 1879 gen_sessionid(new); 1880 1881 INIT_LIST_HEAD(&new->se_conns); 1882 1883 new->se_cb_seq_nr = 1; 1884 new->se_flags = cses->flags; 1885 new->se_cb_prog = cses->callback_prog; 1886 new->se_cb_sec = cses->cb_sec; 1887 atomic_set(&new->se_ref, 0); 1888 idx = hash_sessionid(&new->se_sessionid); 1889 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1890 spin_lock(&clp->cl_lock); 1891 list_add(&new->se_perclnt, &clp->cl_sessions); 1892 spin_unlock(&clp->cl_lock); 1893 1894 { 1895 struct sockaddr *sa = svc_addr(rqstp); 1896 /* 1897 * This is a little silly; with sessions there's no real 1898 * use for the callback address. Use the peer address 1899 * as a reasonable default for now, but consider fixing 1900 * the rpc client not to require an address in the 1901 * future: 1902 */ 1903 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 1904 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 1905 } 1906 } 1907 1908 /* caller must hold client_lock */ 1909 static struct nfsd4_session * 1910 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1911 { 1912 struct nfsd4_session *elem; 1913 int idx; 1914 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1915 1916 lockdep_assert_held(&nn->client_lock); 1917 1918 dump_sessionid(__func__, sessionid); 1919 idx = hash_sessionid(sessionid); 1920 /* Search in the appropriate list */ 1921 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 1922 if (!memcmp(elem->se_sessionid.data, sessionid->data, 1923 NFS4_MAX_SESSIONID_LEN)) { 1924 return elem; 1925 } 1926 } 1927 1928 dprintk("%s: session not found\n", __func__); 1929 return NULL; 1930 } 1931 1932 static struct nfsd4_session * 1933 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 1934 __be32 *ret) 1935 { 1936 struct nfsd4_session *session; 1937 __be32 status = nfserr_badsession; 1938 1939 session = __find_in_sessionid_hashtbl(sessionid, net); 1940 if (!session) 1941 goto out; 1942 status = nfsd4_get_session_locked(session); 1943 if (status) 1944 session = NULL; 1945 out: 1946 *ret = status; 1947 return session; 1948 } 1949 1950 /* caller must hold client_lock */ 1951 static void 1952 unhash_session(struct nfsd4_session *ses) 1953 { 1954 struct nfs4_client *clp = ses->se_client; 1955 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1956 1957 lockdep_assert_held(&nn->client_lock); 1958 1959 list_del(&ses->se_hash); 1960 spin_lock(&ses->se_client->cl_lock); 1961 list_del(&ses->se_perclnt); 1962 spin_unlock(&ses->se_client->cl_lock); 1963 } 1964 1965 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1966 static int 1967 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1968 { 1969 /* 1970 * We're assuming the clid was not given out from a boot 1971 * precisely 2^32 (about 136 years) before this one. That seems 1972 * a safe assumption: 1973 */ 1974 if (clid->cl_boot == (u32)nn->boot_time) 1975 return 0; 1976 trace_nfsd_clid_stale(clid); 1977 return 1; 1978 } 1979 1980 /* 1981 * XXX Should we use a slab cache ? 1982 * This type of memory management is somewhat inefficient, but we use it 1983 * anyway since SETCLIENTID is not a common operation. 1984 */ 1985 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1986 { 1987 struct nfs4_client *clp; 1988 int i; 1989 1990 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); 1991 if (clp == NULL) 1992 return NULL; 1993 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); 1994 if (clp->cl_name.data == NULL) 1995 goto err_no_name; 1996 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, 1997 sizeof(struct list_head), 1998 GFP_KERNEL); 1999 if (!clp->cl_ownerstr_hashtbl) 2000 goto err_no_hashtbl; 2001 for (i = 0; i < OWNER_HASH_SIZE; i++) 2002 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 2003 INIT_LIST_HEAD(&clp->cl_sessions); 2004 idr_init(&clp->cl_stateids); 2005 atomic_set(&clp->cl_rpc_users, 0); 2006 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 2007 INIT_LIST_HEAD(&clp->cl_idhash); 2008 INIT_LIST_HEAD(&clp->cl_openowners); 2009 INIT_LIST_HEAD(&clp->cl_delegations); 2010 INIT_LIST_HEAD(&clp->cl_lru); 2011 INIT_LIST_HEAD(&clp->cl_revoked); 2012 #ifdef CONFIG_NFSD_PNFS 2013 INIT_LIST_HEAD(&clp->cl_lo_states); 2014 #endif 2015 INIT_LIST_HEAD(&clp->async_copies); 2016 spin_lock_init(&clp->async_lock); 2017 spin_lock_init(&clp->cl_lock); 2018 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 2019 return clp; 2020 err_no_hashtbl: 2021 kfree(clp->cl_name.data); 2022 err_no_name: 2023 kmem_cache_free(client_slab, clp); 2024 return NULL; 2025 } 2026 2027 static void __free_client(struct kref *k) 2028 { 2029 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); 2030 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); 2031 2032 free_svc_cred(&clp->cl_cred); 2033 kfree(clp->cl_ownerstr_hashtbl); 2034 kfree(clp->cl_name.data); 2035 kfree(clp->cl_nii_domain.data); 2036 kfree(clp->cl_nii_name.data); 2037 idr_destroy(&clp->cl_stateids); 2038 kmem_cache_free(client_slab, clp); 2039 } 2040 2041 static void drop_client(struct nfs4_client *clp) 2042 { 2043 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); 2044 } 2045 2046 static void 2047 free_client(struct nfs4_client *clp) 2048 { 2049 while (!list_empty(&clp->cl_sessions)) { 2050 struct nfsd4_session *ses; 2051 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 2052 se_perclnt); 2053 list_del(&ses->se_perclnt); 2054 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 2055 free_session(ses); 2056 } 2057 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 2058 if (clp->cl_nfsd_dentry) { 2059 nfsd_client_rmdir(clp->cl_nfsd_dentry); 2060 clp->cl_nfsd_dentry = NULL; 2061 wake_up_all(&expiry_wq); 2062 } 2063 drop_client(clp); 2064 } 2065 2066 /* must be called under the client_lock */ 2067 static void 2068 unhash_client_locked(struct nfs4_client *clp) 2069 { 2070 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2071 struct nfsd4_session *ses; 2072 2073 lockdep_assert_held(&nn->client_lock); 2074 2075 /* Mark the client as expired! */ 2076 clp->cl_time = 0; 2077 /* Make it invisible */ 2078 if (!list_empty(&clp->cl_idhash)) { 2079 list_del_init(&clp->cl_idhash); 2080 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2081 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 2082 else 2083 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2084 } 2085 list_del_init(&clp->cl_lru); 2086 spin_lock(&clp->cl_lock); 2087 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 2088 list_del_init(&ses->se_hash); 2089 spin_unlock(&clp->cl_lock); 2090 } 2091 2092 static void 2093 unhash_client(struct nfs4_client *clp) 2094 { 2095 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2096 2097 spin_lock(&nn->client_lock); 2098 unhash_client_locked(clp); 2099 spin_unlock(&nn->client_lock); 2100 } 2101 2102 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 2103 { 2104 if (atomic_read(&clp->cl_rpc_users)) 2105 return nfserr_jukebox; 2106 unhash_client_locked(clp); 2107 return nfs_ok; 2108 } 2109 2110 static void 2111 __destroy_client(struct nfs4_client *clp) 2112 { 2113 int i; 2114 struct nfs4_openowner *oo; 2115 struct nfs4_delegation *dp; 2116 struct list_head reaplist; 2117 2118 INIT_LIST_HEAD(&reaplist); 2119 spin_lock(&state_lock); 2120 while (!list_empty(&clp->cl_delegations)) { 2121 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 2122 WARN_ON(!unhash_delegation_locked(dp)); 2123 list_add(&dp->dl_recall_lru, &reaplist); 2124 } 2125 spin_unlock(&state_lock); 2126 while (!list_empty(&reaplist)) { 2127 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 2128 list_del_init(&dp->dl_recall_lru); 2129 destroy_unhashed_deleg(dp); 2130 } 2131 while (!list_empty(&clp->cl_revoked)) { 2132 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 2133 list_del_init(&dp->dl_recall_lru); 2134 nfs4_put_stid(&dp->dl_stid); 2135 } 2136 while (!list_empty(&clp->cl_openowners)) { 2137 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 2138 nfs4_get_stateowner(&oo->oo_owner); 2139 release_openowner(oo); 2140 } 2141 for (i = 0; i < OWNER_HASH_SIZE; i++) { 2142 struct nfs4_stateowner *so, *tmp; 2143 2144 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], 2145 so_strhash) { 2146 /* Should be no openowners at this point */ 2147 WARN_ON_ONCE(so->so_is_open_owner); 2148 remove_blocked_locks(lockowner(so)); 2149 } 2150 } 2151 nfsd4_return_all_client_layouts(clp); 2152 nfsd4_shutdown_copy(clp); 2153 nfsd4_shutdown_callback(clp); 2154 if (clp->cl_cb_conn.cb_xprt) 2155 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 2156 free_client(clp); 2157 wake_up_all(&expiry_wq); 2158 } 2159 2160 static void 2161 destroy_client(struct nfs4_client *clp) 2162 { 2163 unhash_client(clp); 2164 __destroy_client(clp); 2165 } 2166 2167 static void inc_reclaim_complete(struct nfs4_client *clp) 2168 { 2169 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2170 2171 if (!nn->track_reclaim_completes) 2172 return; 2173 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) 2174 return; 2175 if (atomic_inc_return(&nn->nr_reclaim_complete) == 2176 nn->reclaim_str_hashtbl_size) { 2177 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", 2178 clp->net->ns.inum); 2179 nfsd4_end_grace(nn); 2180 } 2181 } 2182 2183 static void expire_client(struct nfs4_client *clp) 2184 { 2185 unhash_client(clp); 2186 nfsd4_client_record_remove(clp); 2187 __destroy_client(clp); 2188 } 2189 2190 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 2191 { 2192 memcpy(target->cl_verifier.data, source->data, 2193 sizeof(target->cl_verifier.data)); 2194 } 2195 2196 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 2197 { 2198 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 2199 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 2200 } 2201 2202 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 2203 { 2204 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); 2205 target->cr_raw_principal = kstrdup(source->cr_raw_principal, 2206 GFP_KERNEL); 2207 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); 2208 if ((source->cr_principal && !target->cr_principal) || 2209 (source->cr_raw_principal && !target->cr_raw_principal) || 2210 (source->cr_targ_princ && !target->cr_targ_princ)) 2211 return -ENOMEM; 2212 2213 target->cr_flavor = source->cr_flavor; 2214 target->cr_uid = source->cr_uid; 2215 target->cr_gid = source->cr_gid; 2216 target->cr_group_info = source->cr_group_info; 2217 get_group_info(target->cr_group_info); 2218 target->cr_gss_mech = source->cr_gss_mech; 2219 if (source->cr_gss_mech) 2220 gss_mech_get(source->cr_gss_mech); 2221 return 0; 2222 } 2223 2224 static int 2225 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 2226 { 2227 if (o1->len < o2->len) 2228 return -1; 2229 if (o1->len > o2->len) 2230 return 1; 2231 return memcmp(o1->data, o2->data, o1->len); 2232 } 2233 2234 static int 2235 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 2236 { 2237 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 2238 } 2239 2240 static int 2241 same_clid(clientid_t *cl1, clientid_t *cl2) 2242 { 2243 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 2244 } 2245 2246 static bool groups_equal(struct group_info *g1, struct group_info *g2) 2247 { 2248 int i; 2249 2250 if (g1->ngroups != g2->ngroups) 2251 return false; 2252 for (i=0; i<g1->ngroups; i++) 2253 if (!gid_eq(g1->gid[i], g2->gid[i])) 2254 return false; 2255 return true; 2256 } 2257 2258 /* 2259 * RFC 3530 language requires clid_inuse be returned when the 2260 * "principal" associated with a requests differs from that previously 2261 * used. We use uid, gid's, and gss principal string as our best 2262 * approximation. We also don't want to allow non-gss use of a client 2263 * established using gss: in theory cr_principal should catch that 2264 * change, but in practice cr_principal can be null even in the gss case 2265 * since gssd doesn't always pass down a principal string. 2266 */ 2267 static bool is_gss_cred(struct svc_cred *cr) 2268 { 2269 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 2270 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 2271 } 2272 2273 2274 static bool 2275 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 2276 { 2277 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 2278 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 2279 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 2280 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 2281 return false; 2282 /* XXX: check that cr_targ_princ fields match ? */ 2283 if (cr1->cr_principal == cr2->cr_principal) 2284 return true; 2285 if (!cr1->cr_principal || !cr2->cr_principal) 2286 return false; 2287 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 2288 } 2289 2290 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 2291 { 2292 struct svc_cred *cr = &rqstp->rq_cred; 2293 u32 service; 2294 2295 if (!cr->cr_gss_mech) 2296 return false; 2297 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 2298 return service == RPC_GSS_SVC_INTEGRITY || 2299 service == RPC_GSS_SVC_PRIVACY; 2300 } 2301 2302 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 2303 { 2304 struct svc_cred *cr = &rqstp->rq_cred; 2305 2306 if (!cl->cl_mach_cred) 2307 return true; 2308 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 2309 return false; 2310 if (!svc_rqst_integrity_protected(rqstp)) 2311 return false; 2312 if (cl->cl_cred.cr_raw_principal) 2313 return 0 == strcmp(cl->cl_cred.cr_raw_principal, 2314 cr->cr_raw_principal); 2315 if (!cr->cr_principal) 2316 return false; 2317 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 2318 } 2319 2320 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 2321 { 2322 __be32 verf[2]; 2323 2324 /* 2325 * This is opaque to client, so no need to byte-swap. Use 2326 * __force to keep sparse happy 2327 */ 2328 verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); 2329 verf[1] = (__force __be32)nn->clverifier_counter++; 2330 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2331 } 2332 2333 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2334 { 2335 clp->cl_clientid.cl_boot = (u32)nn->boot_time; 2336 clp->cl_clientid.cl_id = nn->clientid_counter++; 2337 gen_confirm(clp, nn); 2338 } 2339 2340 static struct nfs4_stid * 2341 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 2342 { 2343 struct nfs4_stid *ret; 2344 2345 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 2346 if (!ret || !ret->sc_type) 2347 return NULL; 2348 return ret; 2349 } 2350 2351 static struct nfs4_stid * 2352 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 2353 { 2354 struct nfs4_stid *s; 2355 2356 spin_lock(&cl->cl_lock); 2357 s = find_stateid_locked(cl, t); 2358 if (s != NULL) { 2359 if (typemask & s->sc_type) 2360 refcount_inc(&s->sc_count); 2361 else 2362 s = NULL; 2363 } 2364 spin_unlock(&cl->cl_lock); 2365 return s; 2366 } 2367 2368 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) 2369 { 2370 struct nfsdfs_client *nc; 2371 nc = get_nfsdfs_client(inode); 2372 if (!nc) 2373 return NULL; 2374 return container_of(nc, struct nfs4_client, cl_nfsdfs); 2375 } 2376 2377 static void seq_quote_mem(struct seq_file *m, char *data, int len) 2378 { 2379 seq_printf(m, "\""); 2380 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\"); 2381 seq_printf(m, "\""); 2382 } 2383 2384 static const char *cb_state2str(int state) 2385 { 2386 switch (state) { 2387 case NFSD4_CB_UP: 2388 return "UP"; 2389 case NFSD4_CB_UNKNOWN: 2390 return "UNKNOWN"; 2391 case NFSD4_CB_DOWN: 2392 return "DOWN"; 2393 case NFSD4_CB_FAULT: 2394 return "FAULT"; 2395 } 2396 return "UNDEFINED"; 2397 } 2398 2399 static int client_info_show(struct seq_file *m, void *v) 2400 { 2401 struct inode *inode = m->private; 2402 struct nfs4_client *clp; 2403 u64 clid; 2404 2405 clp = get_nfsdfs_clp(inode); 2406 if (!clp) 2407 return -ENXIO; 2408 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); 2409 seq_printf(m, "clientid: 0x%llx\n", clid); 2410 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); 2411 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2412 seq_puts(m, "status: confirmed\n"); 2413 else 2414 seq_puts(m, "status: unconfirmed\n"); 2415 seq_printf(m, "name: "); 2416 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); 2417 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); 2418 if (clp->cl_nii_domain.data) { 2419 seq_printf(m, "Implementation domain: "); 2420 seq_quote_mem(m, clp->cl_nii_domain.data, 2421 clp->cl_nii_domain.len); 2422 seq_printf(m, "\nImplementation name: "); 2423 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); 2424 seq_printf(m, "\nImplementation time: [%lld, %ld]\n", 2425 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); 2426 } 2427 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); 2428 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr); 2429 drop_client(clp); 2430 2431 return 0; 2432 } 2433 2434 static int client_info_open(struct inode *inode, struct file *file) 2435 { 2436 return single_open(file, client_info_show, inode); 2437 } 2438 2439 static const struct file_operations client_info_fops = { 2440 .open = client_info_open, 2441 .read = seq_read, 2442 .llseek = seq_lseek, 2443 .release = single_release, 2444 }; 2445 2446 static void *states_start(struct seq_file *s, loff_t *pos) 2447 __acquires(&clp->cl_lock) 2448 { 2449 struct nfs4_client *clp = s->private; 2450 unsigned long id = *pos; 2451 void *ret; 2452 2453 spin_lock(&clp->cl_lock); 2454 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2455 *pos = id; 2456 return ret; 2457 } 2458 2459 static void *states_next(struct seq_file *s, void *v, loff_t *pos) 2460 { 2461 struct nfs4_client *clp = s->private; 2462 unsigned long id = *pos; 2463 void *ret; 2464 2465 id = *pos; 2466 id++; 2467 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2468 *pos = id; 2469 return ret; 2470 } 2471 2472 static void states_stop(struct seq_file *s, void *v) 2473 __releases(&clp->cl_lock) 2474 { 2475 struct nfs4_client *clp = s->private; 2476 2477 spin_unlock(&clp->cl_lock); 2478 } 2479 2480 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) 2481 { 2482 seq_printf(s, "filename: \"%pD2\"", f->nf_file); 2483 } 2484 2485 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) 2486 { 2487 struct inode *inode = f->nf_inode; 2488 2489 seq_printf(s, "superblock: \"%02x:%02x:%ld\"", 2490 MAJOR(inode->i_sb->s_dev), 2491 MINOR(inode->i_sb->s_dev), 2492 inode->i_ino); 2493 } 2494 2495 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) 2496 { 2497 seq_printf(s, "owner: "); 2498 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); 2499 } 2500 2501 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) 2502 { 2503 seq_printf(s, "0x%.8x", stid->si_generation); 2504 seq_printf(s, "%12phN", &stid->si_opaque); 2505 } 2506 2507 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) 2508 { 2509 struct nfs4_ol_stateid *ols; 2510 struct nfs4_file *nf; 2511 struct nfsd_file *file; 2512 struct nfs4_stateowner *oo; 2513 unsigned int access, deny; 2514 2515 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) 2516 return 0; /* XXX: or SEQ_SKIP? */ 2517 ols = openlockstateid(st); 2518 oo = ols->st_stateowner; 2519 nf = st->sc_file; 2520 file = find_any_file(nf); 2521 if (!file) 2522 return 0; 2523 2524 seq_printf(s, "- "); 2525 nfs4_show_stateid(s, &st->sc_stateid); 2526 seq_printf(s, ": { type: open, "); 2527 2528 access = bmap_to_share_mode(ols->st_access_bmap); 2529 deny = bmap_to_share_mode(ols->st_deny_bmap); 2530 2531 seq_printf(s, "access: %s%s, ", 2532 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2533 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2534 seq_printf(s, "deny: %s%s, ", 2535 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2536 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2537 2538 nfs4_show_superblock(s, file); 2539 seq_printf(s, ", "); 2540 nfs4_show_fname(s, file); 2541 seq_printf(s, ", "); 2542 nfs4_show_owner(s, oo); 2543 seq_printf(s, " }\n"); 2544 nfsd_file_put(file); 2545 2546 return 0; 2547 } 2548 2549 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) 2550 { 2551 struct nfs4_ol_stateid *ols; 2552 struct nfs4_file *nf; 2553 struct nfsd_file *file; 2554 struct nfs4_stateowner *oo; 2555 2556 ols = openlockstateid(st); 2557 oo = ols->st_stateowner; 2558 nf = st->sc_file; 2559 file = find_any_file(nf); 2560 if (!file) 2561 return 0; 2562 2563 seq_printf(s, "- "); 2564 nfs4_show_stateid(s, &st->sc_stateid); 2565 seq_printf(s, ": { type: lock, "); 2566 2567 /* 2568 * Note: a lock stateid isn't really the same thing as a lock, 2569 * it's the locking state held by one owner on a file, and there 2570 * may be multiple (or no) lock ranges associated with it. 2571 * (Same for the matter is true of open stateids.) 2572 */ 2573 2574 nfs4_show_superblock(s, file); 2575 /* XXX: open stateid? */ 2576 seq_printf(s, ", "); 2577 nfs4_show_fname(s, file); 2578 seq_printf(s, ", "); 2579 nfs4_show_owner(s, oo); 2580 seq_printf(s, " }\n"); 2581 nfsd_file_put(file); 2582 2583 return 0; 2584 } 2585 2586 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) 2587 { 2588 struct nfs4_delegation *ds; 2589 struct nfs4_file *nf; 2590 struct nfsd_file *file; 2591 2592 ds = delegstateid(st); 2593 nf = st->sc_file; 2594 file = find_deleg_file(nf); 2595 if (!file) 2596 return 0; 2597 2598 seq_printf(s, "- "); 2599 nfs4_show_stateid(s, &st->sc_stateid); 2600 seq_printf(s, ": { type: deleg, "); 2601 2602 /* Kinda dead code as long as we only support read delegs: */ 2603 seq_printf(s, "access: %s, ", 2604 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); 2605 2606 /* XXX: lease time, whether it's being recalled. */ 2607 2608 nfs4_show_superblock(s, file); 2609 seq_printf(s, ", "); 2610 nfs4_show_fname(s, file); 2611 seq_printf(s, " }\n"); 2612 nfsd_file_put(file); 2613 2614 return 0; 2615 } 2616 2617 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) 2618 { 2619 struct nfs4_layout_stateid *ls; 2620 struct nfsd_file *file; 2621 2622 ls = container_of(st, struct nfs4_layout_stateid, ls_stid); 2623 file = ls->ls_file; 2624 2625 seq_printf(s, "- "); 2626 nfs4_show_stateid(s, &st->sc_stateid); 2627 seq_printf(s, ": { type: layout, "); 2628 2629 /* XXX: What else would be useful? */ 2630 2631 nfs4_show_superblock(s, file); 2632 seq_printf(s, ", "); 2633 nfs4_show_fname(s, file); 2634 seq_printf(s, " }\n"); 2635 2636 return 0; 2637 } 2638 2639 static int states_show(struct seq_file *s, void *v) 2640 { 2641 struct nfs4_stid *st = v; 2642 2643 switch (st->sc_type) { 2644 case NFS4_OPEN_STID: 2645 return nfs4_show_open(s, st); 2646 case NFS4_LOCK_STID: 2647 return nfs4_show_lock(s, st); 2648 case NFS4_DELEG_STID: 2649 return nfs4_show_deleg(s, st); 2650 case NFS4_LAYOUT_STID: 2651 return nfs4_show_layout(s, st); 2652 default: 2653 return 0; /* XXX: or SEQ_SKIP? */ 2654 } 2655 /* XXX: copy stateids? */ 2656 } 2657 2658 static struct seq_operations states_seq_ops = { 2659 .start = states_start, 2660 .next = states_next, 2661 .stop = states_stop, 2662 .show = states_show 2663 }; 2664 2665 static int client_states_open(struct inode *inode, struct file *file) 2666 { 2667 struct seq_file *s; 2668 struct nfs4_client *clp; 2669 int ret; 2670 2671 clp = get_nfsdfs_clp(inode); 2672 if (!clp) 2673 return -ENXIO; 2674 2675 ret = seq_open(file, &states_seq_ops); 2676 if (ret) 2677 return ret; 2678 s = file->private_data; 2679 s->private = clp; 2680 return 0; 2681 } 2682 2683 static int client_opens_release(struct inode *inode, struct file *file) 2684 { 2685 struct seq_file *m = file->private_data; 2686 struct nfs4_client *clp = m->private; 2687 2688 /* XXX: alternatively, we could get/drop in seq start/stop */ 2689 drop_client(clp); 2690 return 0; 2691 } 2692 2693 static const struct file_operations client_states_fops = { 2694 .open = client_states_open, 2695 .read = seq_read, 2696 .llseek = seq_lseek, 2697 .release = client_opens_release, 2698 }; 2699 2700 /* 2701 * Normally we refuse to destroy clients that are in use, but here the 2702 * administrator is telling us to just do it. We also want to wait 2703 * so the caller has a guarantee that the client's locks are gone by 2704 * the time the write returns: 2705 */ 2706 static void force_expire_client(struct nfs4_client *clp) 2707 { 2708 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2709 bool already_expired; 2710 2711 trace_nfsd_clid_admin_expired(&clp->cl_clientid); 2712 2713 spin_lock(&nn->client_lock); 2714 clp->cl_time = 0; 2715 spin_unlock(&nn->client_lock); 2716 2717 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); 2718 spin_lock(&nn->client_lock); 2719 already_expired = list_empty(&clp->cl_lru); 2720 if (!already_expired) 2721 unhash_client_locked(clp); 2722 spin_unlock(&nn->client_lock); 2723 2724 if (!already_expired) 2725 expire_client(clp); 2726 else 2727 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); 2728 } 2729 2730 static ssize_t client_ctl_write(struct file *file, const char __user *buf, 2731 size_t size, loff_t *pos) 2732 { 2733 char *data; 2734 struct nfs4_client *clp; 2735 2736 data = simple_transaction_get(file, buf, size); 2737 if (IS_ERR(data)) 2738 return PTR_ERR(data); 2739 if (size != 7 || 0 != memcmp(data, "expire\n", 7)) 2740 return -EINVAL; 2741 clp = get_nfsdfs_clp(file_inode(file)); 2742 if (!clp) 2743 return -ENXIO; 2744 force_expire_client(clp); 2745 drop_client(clp); 2746 return 7; 2747 } 2748 2749 static const struct file_operations client_ctl_fops = { 2750 .write = client_ctl_write, 2751 .release = simple_transaction_release, 2752 }; 2753 2754 static const struct tree_descr client_files[] = { 2755 [0] = {"info", &client_info_fops, S_IRUSR}, 2756 [1] = {"states", &client_states_fops, S_IRUSR}, 2757 [2] = {"ctl", &client_ctl_fops, S_IWUSR}, 2758 [3] = {""}, 2759 }; 2760 2761 static struct nfs4_client *create_client(struct xdr_netobj name, 2762 struct svc_rqst *rqstp, nfs4_verifier *verf) 2763 { 2764 struct nfs4_client *clp; 2765 struct sockaddr *sa = svc_addr(rqstp); 2766 int ret; 2767 struct net *net = SVC_NET(rqstp); 2768 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2769 struct dentry *dentries[ARRAY_SIZE(client_files)]; 2770 2771 clp = alloc_client(name); 2772 if (clp == NULL) 2773 return NULL; 2774 2775 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 2776 if (ret) { 2777 free_client(clp); 2778 return NULL; 2779 } 2780 gen_clid(clp, nn); 2781 kref_init(&clp->cl_nfsdfs.cl_ref); 2782 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 2783 clp->cl_time = ktime_get_boottime_seconds(); 2784 clear_bit(0, &clp->cl_cb_slot_busy); 2785 copy_verf(clp, verf); 2786 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); 2787 clp->cl_cb_session = NULL; 2788 clp->net = net; 2789 clp->cl_nfsd_dentry = nfsd_client_mkdir( 2790 nn, &clp->cl_nfsdfs, 2791 clp->cl_clientid.cl_id - nn->clientid_base, 2792 client_files, dentries); 2793 clp->cl_nfsd_info_dentry = dentries[0]; 2794 if (!clp->cl_nfsd_dentry) { 2795 free_client(clp); 2796 return NULL; 2797 } 2798 return clp; 2799 } 2800 2801 static void 2802 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 2803 { 2804 struct rb_node **new = &(root->rb_node), *parent = NULL; 2805 struct nfs4_client *clp; 2806 2807 while (*new) { 2808 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 2809 parent = *new; 2810 2811 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 2812 new = &((*new)->rb_left); 2813 else 2814 new = &((*new)->rb_right); 2815 } 2816 2817 rb_link_node(&new_clp->cl_namenode, parent, new); 2818 rb_insert_color(&new_clp->cl_namenode, root); 2819 } 2820 2821 static struct nfs4_client * 2822 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 2823 { 2824 int cmp; 2825 struct rb_node *node = root->rb_node; 2826 struct nfs4_client *clp; 2827 2828 while (node) { 2829 clp = rb_entry(node, struct nfs4_client, cl_namenode); 2830 cmp = compare_blob(&clp->cl_name, name); 2831 if (cmp > 0) 2832 node = node->rb_left; 2833 else if (cmp < 0) 2834 node = node->rb_right; 2835 else 2836 return clp; 2837 } 2838 return NULL; 2839 } 2840 2841 static void 2842 add_to_unconfirmed(struct nfs4_client *clp) 2843 { 2844 unsigned int idhashval; 2845 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2846 2847 lockdep_assert_held(&nn->client_lock); 2848 2849 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2850 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 2851 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2852 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 2853 renew_client_locked(clp); 2854 } 2855 2856 static void 2857 move_to_confirmed(struct nfs4_client *clp) 2858 { 2859 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2860 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2861 2862 lockdep_assert_held(&nn->client_lock); 2863 2864 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 2865 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2866 add_clp_to_name_tree(clp, &nn->conf_name_tree); 2867 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2868 trace_nfsd_clid_confirmed(&clp->cl_clientid); 2869 renew_client_locked(clp); 2870 } 2871 2872 static struct nfs4_client * 2873 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 2874 { 2875 struct nfs4_client *clp; 2876 unsigned int idhashval = clientid_hashval(clid->cl_id); 2877 2878 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 2879 if (same_clid(&clp->cl_clientid, clid)) { 2880 if ((bool)clp->cl_minorversion != sessions) 2881 return NULL; 2882 renew_client_locked(clp); 2883 return clp; 2884 } 2885 } 2886 return NULL; 2887 } 2888 2889 static struct nfs4_client * 2890 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2891 { 2892 struct list_head *tbl = nn->conf_id_hashtbl; 2893 2894 lockdep_assert_held(&nn->client_lock); 2895 return find_client_in_id_table(tbl, clid, sessions); 2896 } 2897 2898 static struct nfs4_client * 2899 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2900 { 2901 struct list_head *tbl = nn->unconf_id_hashtbl; 2902 2903 lockdep_assert_held(&nn->client_lock); 2904 return find_client_in_id_table(tbl, clid, sessions); 2905 } 2906 2907 static bool clp_used_exchangeid(struct nfs4_client *clp) 2908 { 2909 return clp->cl_exchange_flags != 0; 2910 } 2911 2912 static struct nfs4_client * 2913 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2914 { 2915 lockdep_assert_held(&nn->client_lock); 2916 return find_clp_in_name_tree(name, &nn->conf_name_tree); 2917 } 2918 2919 static struct nfs4_client * 2920 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2921 { 2922 lockdep_assert_held(&nn->client_lock); 2923 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 2924 } 2925 2926 static void 2927 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 2928 { 2929 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 2930 struct sockaddr *sa = svc_addr(rqstp); 2931 u32 scopeid = rpc_get_scope_id(sa); 2932 unsigned short expected_family; 2933 2934 /* Currently, we only support tcp and tcp6 for the callback channel */ 2935 if (se->se_callback_netid_len == 3 && 2936 !memcmp(se->se_callback_netid_val, "tcp", 3)) 2937 expected_family = AF_INET; 2938 else if (se->se_callback_netid_len == 4 && 2939 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 2940 expected_family = AF_INET6; 2941 else 2942 goto out_err; 2943 2944 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 2945 se->se_callback_addr_len, 2946 (struct sockaddr *)&conn->cb_addr, 2947 sizeof(conn->cb_addr)); 2948 2949 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 2950 goto out_err; 2951 2952 if (conn->cb_addr.ss_family == AF_INET6) 2953 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 2954 2955 conn->cb_prog = se->se_callback_prog; 2956 conn->cb_ident = se->se_callback_ident; 2957 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 2958 trace_nfsd_cb_args(clp, conn); 2959 return; 2960 out_err: 2961 conn->cb_addr.ss_family = AF_UNSPEC; 2962 conn->cb_addrlen = 0; 2963 trace_nfsd_cb_nodelegs(clp); 2964 return; 2965 } 2966 2967 /* 2968 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 2969 */ 2970 static void 2971 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 2972 { 2973 struct xdr_buf *buf = resp->xdr->buf; 2974 struct nfsd4_slot *slot = resp->cstate.slot; 2975 unsigned int base; 2976 2977 dprintk("--> %s slot %p\n", __func__, slot); 2978 2979 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 2980 slot->sl_opcnt = resp->opcnt; 2981 slot->sl_status = resp->cstate.status; 2982 free_svc_cred(&slot->sl_cred); 2983 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); 2984 2985 if (!nfsd4_cache_this(resp)) { 2986 slot->sl_flags &= ~NFSD4_SLOT_CACHED; 2987 return; 2988 } 2989 slot->sl_flags |= NFSD4_SLOT_CACHED; 2990 2991 base = resp->cstate.data_offset; 2992 slot->sl_datalen = buf->len - base; 2993 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 2994 WARN(1, "%s: sessions DRC could not cache compound\n", 2995 __func__); 2996 return; 2997 } 2998 2999 /* 3000 * Encode the replay sequence operation from the slot values. 3001 * If cachethis is FALSE encode the uncached rep error on the next 3002 * operation which sets resp->p and increments resp->opcnt for 3003 * nfs4svc_encode_compoundres. 3004 * 3005 */ 3006 static __be32 3007 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 3008 struct nfsd4_compoundres *resp) 3009 { 3010 struct nfsd4_op *op; 3011 struct nfsd4_slot *slot = resp->cstate.slot; 3012 3013 /* Encode the replayed sequence operation */ 3014 op = &args->ops[resp->opcnt - 1]; 3015 nfsd4_encode_operation(resp, op); 3016 3017 if (slot->sl_flags & NFSD4_SLOT_CACHED) 3018 return op->status; 3019 if (args->opcnt == 1) { 3020 /* 3021 * The original operation wasn't a solo sequence--we 3022 * always cache those--so this retry must not match the 3023 * original: 3024 */ 3025 op->status = nfserr_seq_false_retry; 3026 } else { 3027 op = &args->ops[resp->opcnt++]; 3028 op->status = nfserr_retry_uncached_rep; 3029 nfsd4_encode_operation(resp, op); 3030 } 3031 return op->status; 3032 } 3033 3034 /* 3035 * The sequence operation is not cached because we can use the slot and 3036 * session values. 3037 */ 3038 static __be32 3039 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 3040 struct nfsd4_sequence *seq) 3041 { 3042 struct nfsd4_slot *slot = resp->cstate.slot; 3043 struct xdr_stream *xdr = resp->xdr; 3044 __be32 *p; 3045 __be32 status; 3046 3047 dprintk("--> %s slot %p\n", __func__, slot); 3048 3049 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 3050 if (status) 3051 return status; 3052 3053 p = xdr_reserve_space(xdr, slot->sl_datalen); 3054 if (!p) { 3055 WARN_ON_ONCE(1); 3056 return nfserr_serverfault; 3057 } 3058 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 3059 xdr_commit_encode(xdr); 3060 3061 resp->opcnt = slot->sl_opcnt; 3062 return slot->sl_status; 3063 } 3064 3065 /* 3066 * Set the exchange_id flags returned by the server. 3067 */ 3068 static void 3069 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 3070 { 3071 #ifdef CONFIG_NFSD_PNFS 3072 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 3073 #else 3074 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 3075 #endif 3076 3077 /* Referrals are supported, Migration is not. */ 3078 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 3079 3080 /* set the wire flags to return to client. */ 3081 clid->flags = new->cl_exchange_flags; 3082 } 3083 3084 static bool client_has_openowners(struct nfs4_client *clp) 3085 { 3086 struct nfs4_openowner *oo; 3087 3088 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { 3089 if (!list_empty(&oo->oo_owner.so_stateids)) 3090 return true; 3091 } 3092 return false; 3093 } 3094 3095 static bool client_has_state(struct nfs4_client *clp) 3096 { 3097 return client_has_openowners(clp) 3098 #ifdef CONFIG_NFSD_PNFS 3099 || !list_empty(&clp->cl_lo_states) 3100 #endif 3101 || !list_empty(&clp->cl_delegations) 3102 || !list_empty(&clp->cl_sessions) 3103 || !list_empty(&clp->async_copies); 3104 } 3105 3106 static __be32 copy_impl_id(struct nfs4_client *clp, 3107 struct nfsd4_exchange_id *exid) 3108 { 3109 if (!exid->nii_domain.data) 3110 return 0; 3111 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); 3112 if (!clp->cl_nii_domain.data) 3113 return nfserr_jukebox; 3114 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); 3115 if (!clp->cl_nii_name.data) 3116 return nfserr_jukebox; 3117 clp->cl_nii_time = exid->nii_time; 3118 return 0; 3119 } 3120 3121 __be32 3122 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3123 union nfsd4_op_u *u) 3124 { 3125 struct nfsd4_exchange_id *exid = &u->exchange_id; 3126 struct nfs4_client *conf, *new; 3127 struct nfs4_client *unconf = NULL; 3128 __be32 status; 3129 char addr_str[INET6_ADDRSTRLEN]; 3130 nfs4_verifier verf = exid->verifier; 3131 struct sockaddr *sa = svc_addr(rqstp); 3132 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 3133 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3134 3135 rpc_ntop(sa, addr_str, sizeof(addr_str)); 3136 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 3137 "ip_addr=%s flags %x, spa_how %u\n", 3138 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 3139 addr_str, exid->flags, exid->spa_how); 3140 3141 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 3142 return nfserr_inval; 3143 3144 new = create_client(exid->clname, rqstp, &verf); 3145 if (new == NULL) 3146 return nfserr_jukebox; 3147 status = copy_impl_id(new, exid); 3148 if (status) 3149 goto out_nolock; 3150 3151 switch (exid->spa_how) { 3152 case SP4_MACH_CRED: 3153 exid->spo_must_enforce[0] = 0; 3154 exid->spo_must_enforce[1] = ( 3155 1 << (OP_BIND_CONN_TO_SESSION - 32) | 3156 1 << (OP_EXCHANGE_ID - 32) | 3157 1 << (OP_CREATE_SESSION - 32) | 3158 1 << (OP_DESTROY_SESSION - 32) | 3159 1 << (OP_DESTROY_CLIENTID - 32)); 3160 3161 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 3162 1 << (OP_OPEN_DOWNGRADE) | 3163 1 << (OP_LOCKU) | 3164 1 << (OP_DELEGRETURN)); 3165 3166 exid->spo_must_allow[1] &= ( 3167 1 << (OP_TEST_STATEID - 32) | 3168 1 << (OP_FREE_STATEID - 32)); 3169 if (!svc_rqst_integrity_protected(rqstp)) { 3170 status = nfserr_inval; 3171 goto out_nolock; 3172 } 3173 /* 3174 * Sometimes userspace doesn't give us a principal. 3175 * Which is a bug, really. Anyway, we can't enforce 3176 * MACH_CRED in that case, better to give up now: 3177 */ 3178 if (!new->cl_cred.cr_principal && 3179 !new->cl_cred.cr_raw_principal) { 3180 status = nfserr_serverfault; 3181 goto out_nolock; 3182 } 3183 new->cl_mach_cred = true; 3184 break; 3185 case SP4_NONE: 3186 break; 3187 default: /* checked by xdr code */ 3188 WARN_ON_ONCE(1); 3189 fallthrough; 3190 case SP4_SSV: 3191 status = nfserr_encr_alg_unsupp; 3192 goto out_nolock; 3193 } 3194 3195 /* Cases below refer to rfc 5661 section 18.35.4: */ 3196 spin_lock(&nn->client_lock); 3197 conf = find_confirmed_client_by_name(&exid->clname, nn); 3198 if (conf) { 3199 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 3200 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 3201 3202 if (update) { 3203 if (!clp_used_exchangeid(conf)) { /* buggy client */ 3204 status = nfserr_inval; 3205 goto out; 3206 } 3207 if (!nfsd4_mach_creds_match(conf, rqstp)) { 3208 status = nfserr_wrong_cred; 3209 goto out; 3210 } 3211 if (!creds_match) { /* case 9 */ 3212 status = nfserr_perm; 3213 goto out; 3214 } 3215 if (!verfs_match) { /* case 8 */ 3216 status = nfserr_not_same; 3217 goto out; 3218 } 3219 /* case 6 */ 3220 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 3221 trace_nfsd_clid_confirmed_r(conf); 3222 goto out_copy; 3223 } 3224 if (!creds_match) { /* case 3 */ 3225 if (client_has_state(conf)) { 3226 status = nfserr_clid_inuse; 3227 trace_nfsd_clid_cred_mismatch(conf, rqstp); 3228 goto out; 3229 } 3230 goto out_new; 3231 } 3232 if (verfs_match) { /* case 2 */ 3233 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 3234 trace_nfsd_clid_confirmed_r(conf); 3235 goto out_copy; 3236 } 3237 /* case 5, client reboot */ 3238 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf); 3239 conf = NULL; 3240 goto out_new; 3241 } 3242 3243 if (update) { /* case 7 */ 3244 status = nfserr_noent; 3245 goto out; 3246 } 3247 3248 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 3249 if (unconf) /* case 4, possible retry or client restart */ 3250 unhash_client_locked(unconf); 3251 3252 /* case 1, new owner ID */ 3253 trace_nfsd_clid_fresh(new); 3254 3255 out_new: 3256 if (conf) { 3257 status = mark_client_expired_locked(conf); 3258 if (status) 3259 goto out; 3260 trace_nfsd_clid_replaced(&conf->cl_clientid); 3261 } 3262 new->cl_minorversion = cstate->minorversion; 3263 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; 3264 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; 3265 3266 add_to_unconfirmed(new); 3267 swap(new, conf); 3268 out_copy: 3269 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 3270 exid->clientid.cl_id = conf->cl_clientid.cl_id; 3271 3272 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 3273 nfsd4_set_ex_flags(conf, exid); 3274 3275 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 3276 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 3277 status = nfs_ok; 3278 3279 out: 3280 spin_unlock(&nn->client_lock); 3281 out_nolock: 3282 if (new) 3283 expire_client(new); 3284 if (unconf) { 3285 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 3286 expire_client(unconf); 3287 } 3288 return status; 3289 } 3290 3291 static __be32 3292 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 3293 { 3294 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 3295 slot_seqid); 3296 3297 /* The slot is in use, and no response has been sent. */ 3298 if (slot_inuse) { 3299 if (seqid == slot_seqid) 3300 return nfserr_jukebox; 3301 else 3302 return nfserr_seq_misordered; 3303 } 3304 /* Note unsigned 32-bit arithmetic handles wraparound: */ 3305 if (likely(seqid == slot_seqid + 1)) 3306 return nfs_ok; 3307 if (seqid == slot_seqid) 3308 return nfserr_replay_cache; 3309 return nfserr_seq_misordered; 3310 } 3311 3312 /* 3313 * Cache the create session result into the create session single DRC 3314 * slot cache by saving the xdr structure. sl_seqid has been set. 3315 * Do this for solo or embedded create session operations. 3316 */ 3317 static void 3318 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 3319 struct nfsd4_clid_slot *slot, __be32 nfserr) 3320 { 3321 slot->sl_status = nfserr; 3322 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 3323 } 3324 3325 static __be32 3326 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 3327 struct nfsd4_clid_slot *slot) 3328 { 3329 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 3330 return slot->sl_status; 3331 } 3332 3333 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 3334 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 3335 1 + /* MIN tag is length with zero, only length */ \ 3336 3 + /* version, opcount, opcode */ \ 3337 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3338 /* seqid, slotID, slotID, cache */ \ 3339 4 ) * sizeof(__be32)) 3340 3341 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 3342 2 + /* verifier: AUTH_NULL, length 0 */\ 3343 1 + /* status */ \ 3344 1 + /* MIN tag is length with zero, only length */ \ 3345 3 + /* opcount, opcode, opstatus*/ \ 3346 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3347 /* seqid, slotID, slotID, slotID, status */ \ 3348 5 ) * sizeof(__be32)) 3349 3350 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 3351 { 3352 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 3353 3354 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 3355 return nfserr_toosmall; 3356 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 3357 return nfserr_toosmall; 3358 ca->headerpadsz = 0; 3359 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 3360 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 3361 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 3362 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 3363 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 3364 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 3365 /* 3366 * Note decreasing slot size below client's request may make it 3367 * difficult for client to function correctly, whereas 3368 * decreasing the number of slots will (just?) affect 3369 * performance. When short on memory we therefore prefer to 3370 * decrease number of slots instead of their size. Clients that 3371 * request larger slots than they need will get poor results: 3372 * Note that we always allow at least one slot, because our 3373 * accounting is soft and provides no guarantees either way. 3374 */ 3375 ca->maxreqs = nfsd4_get_drc_mem(ca, nn); 3376 3377 return nfs_ok; 3378 } 3379 3380 /* 3381 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. 3382 * These are based on similar macros in linux/sunrpc/msg_prot.h . 3383 */ 3384 #define RPC_MAX_HEADER_WITH_AUTH_SYS \ 3385 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) 3386 3387 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ 3388 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) 3389 3390 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 3391 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) 3392 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 3393 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ 3394 sizeof(__be32)) 3395 3396 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 3397 { 3398 ca->headerpadsz = 0; 3399 3400 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 3401 return nfserr_toosmall; 3402 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 3403 return nfserr_toosmall; 3404 ca->maxresp_cached = 0; 3405 if (ca->maxops < 2) 3406 return nfserr_toosmall; 3407 3408 return nfs_ok; 3409 } 3410 3411 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 3412 { 3413 switch (cbs->flavor) { 3414 case RPC_AUTH_NULL: 3415 case RPC_AUTH_UNIX: 3416 return nfs_ok; 3417 default: 3418 /* 3419 * GSS case: the spec doesn't allow us to return this 3420 * error. But it also doesn't allow us not to support 3421 * GSS. 3422 * I'd rather this fail hard than return some error the 3423 * client might think it can already handle: 3424 */ 3425 return nfserr_encr_alg_unsupp; 3426 } 3427 } 3428 3429 __be32 3430 nfsd4_create_session(struct svc_rqst *rqstp, 3431 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3432 { 3433 struct nfsd4_create_session *cr_ses = &u->create_session; 3434 struct sockaddr *sa = svc_addr(rqstp); 3435 struct nfs4_client *conf, *unconf; 3436 struct nfs4_client *old = NULL; 3437 struct nfsd4_session *new; 3438 struct nfsd4_conn *conn; 3439 struct nfsd4_clid_slot *cs_slot = NULL; 3440 __be32 status = 0; 3441 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3442 3443 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 3444 return nfserr_inval; 3445 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 3446 if (status) 3447 return status; 3448 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 3449 if (status) 3450 return status; 3451 status = check_backchannel_attrs(&cr_ses->back_channel); 3452 if (status) 3453 goto out_release_drc_mem; 3454 status = nfserr_jukebox; 3455 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 3456 if (!new) 3457 goto out_release_drc_mem; 3458 conn = alloc_conn_from_crses(rqstp, cr_ses); 3459 if (!conn) 3460 goto out_free_session; 3461 3462 spin_lock(&nn->client_lock); 3463 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 3464 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 3465 WARN_ON_ONCE(conf && unconf); 3466 3467 if (conf) { 3468 status = nfserr_wrong_cred; 3469 if (!nfsd4_mach_creds_match(conf, rqstp)) 3470 goto out_free_conn; 3471 cs_slot = &conf->cl_cs_slot; 3472 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3473 if (status) { 3474 if (status == nfserr_replay_cache) 3475 status = nfsd4_replay_create_session(cr_ses, cs_slot); 3476 goto out_free_conn; 3477 } 3478 } else if (unconf) { 3479 status = nfserr_clid_inuse; 3480 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 3481 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 3482 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 3483 goto out_free_conn; 3484 } 3485 status = nfserr_wrong_cred; 3486 if (!nfsd4_mach_creds_match(unconf, rqstp)) 3487 goto out_free_conn; 3488 cs_slot = &unconf->cl_cs_slot; 3489 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3490 if (status) { 3491 /* an unconfirmed replay returns misordered */ 3492 status = nfserr_seq_misordered; 3493 goto out_free_conn; 3494 } 3495 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 3496 if (old) { 3497 status = mark_client_expired_locked(old); 3498 if (status) { 3499 old = NULL; 3500 goto out_free_conn; 3501 } 3502 trace_nfsd_clid_replaced(&old->cl_clientid); 3503 } 3504 move_to_confirmed(unconf); 3505 conf = unconf; 3506 } else { 3507 status = nfserr_stale_clientid; 3508 goto out_free_conn; 3509 } 3510 status = nfs_ok; 3511 /* Persistent sessions are not supported */ 3512 cr_ses->flags &= ~SESSION4_PERSIST; 3513 /* Upshifting from TCP to RDMA is not supported */ 3514 cr_ses->flags &= ~SESSION4_RDMA; 3515 3516 init_session(rqstp, new, conf, cr_ses); 3517 nfsd4_get_session_locked(new); 3518 3519 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 3520 NFS4_MAX_SESSIONID_LEN); 3521 cs_slot->sl_seqid++; 3522 cr_ses->seqid = cs_slot->sl_seqid; 3523 3524 /* cache solo and embedded create sessions under the client_lock */ 3525 nfsd4_cache_create_session(cr_ses, cs_slot, status); 3526 spin_unlock(&nn->client_lock); 3527 if (conf == unconf) 3528 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 3529 /* init connection and backchannel */ 3530 nfsd4_init_conn(rqstp, conn, new); 3531 nfsd4_put_session(new); 3532 if (old) 3533 expire_client(old); 3534 return status; 3535 out_free_conn: 3536 spin_unlock(&nn->client_lock); 3537 free_conn(conn); 3538 if (old) 3539 expire_client(old); 3540 out_free_session: 3541 __free_session(new); 3542 out_release_drc_mem: 3543 nfsd4_put_drc_mem(&cr_ses->fore_channel); 3544 return status; 3545 } 3546 3547 static __be32 nfsd4_map_bcts_dir(u32 *dir) 3548 { 3549 switch (*dir) { 3550 case NFS4_CDFC4_FORE: 3551 case NFS4_CDFC4_BACK: 3552 return nfs_ok; 3553 case NFS4_CDFC4_FORE_OR_BOTH: 3554 case NFS4_CDFC4_BACK_OR_BOTH: 3555 *dir = NFS4_CDFC4_BOTH; 3556 return nfs_ok; 3557 } 3558 return nfserr_inval; 3559 } 3560 3561 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, 3562 struct nfsd4_compound_state *cstate, 3563 union nfsd4_op_u *u) 3564 { 3565 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; 3566 struct nfsd4_session *session = cstate->session; 3567 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3568 __be32 status; 3569 3570 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 3571 if (status) 3572 return status; 3573 spin_lock(&nn->client_lock); 3574 session->se_cb_prog = bc->bc_cb_program; 3575 session->se_cb_sec = bc->bc_cb_sec; 3576 spin_unlock(&nn->client_lock); 3577 3578 nfsd4_probe_callback(session->se_client); 3579 3580 return nfs_ok; 3581 } 3582 3583 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 3584 { 3585 struct nfsd4_conn *c; 3586 3587 list_for_each_entry(c, &s->se_conns, cn_persession) { 3588 if (c->cn_xprt == xpt) { 3589 return c; 3590 } 3591 } 3592 return NULL; 3593 } 3594 3595 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, 3596 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) 3597 { 3598 struct nfs4_client *clp = session->se_client; 3599 struct svc_xprt *xpt = rqst->rq_xprt; 3600 struct nfsd4_conn *c; 3601 __be32 status; 3602 3603 /* Following the last paragraph of RFC 5661 Section 18.34.3: */ 3604 spin_lock(&clp->cl_lock); 3605 c = __nfsd4_find_conn(xpt, session); 3606 if (!c) 3607 status = nfserr_noent; 3608 else if (req == c->cn_flags) 3609 status = nfs_ok; 3610 else if (req == NFS4_CDFC4_FORE_OR_BOTH && 3611 c->cn_flags != NFS4_CDFC4_BACK) 3612 status = nfs_ok; 3613 else if (req == NFS4_CDFC4_BACK_OR_BOTH && 3614 c->cn_flags != NFS4_CDFC4_FORE) 3615 status = nfs_ok; 3616 else 3617 status = nfserr_inval; 3618 spin_unlock(&clp->cl_lock); 3619 if (status == nfs_ok && conn) 3620 *conn = c; 3621 return status; 3622 } 3623 3624 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 3625 struct nfsd4_compound_state *cstate, 3626 union nfsd4_op_u *u) 3627 { 3628 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; 3629 __be32 status; 3630 struct nfsd4_conn *conn; 3631 struct nfsd4_session *session; 3632 struct net *net = SVC_NET(rqstp); 3633 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3634 3635 if (!nfsd4_last_compound_op(rqstp)) 3636 return nfserr_not_only_op; 3637 spin_lock(&nn->client_lock); 3638 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 3639 spin_unlock(&nn->client_lock); 3640 if (!session) 3641 goto out_no_session; 3642 status = nfserr_wrong_cred; 3643 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 3644 goto out; 3645 status = nfsd4_match_existing_connection(rqstp, session, 3646 bcts->dir, &conn); 3647 if (status == nfs_ok) { 3648 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || 3649 bcts->dir == NFS4_CDFC4_BACK) 3650 conn->cn_flags |= NFS4_CDFC4_BACK; 3651 nfsd4_probe_callback(session->se_client); 3652 goto out; 3653 } 3654 if (status == nfserr_inval) 3655 goto out; 3656 status = nfsd4_map_bcts_dir(&bcts->dir); 3657 if (status) 3658 goto out; 3659 conn = alloc_conn(rqstp, bcts->dir); 3660 status = nfserr_jukebox; 3661 if (!conn) 3662 goto out; 3663 nfsd4_init_conn(rqstp, conn, session); 3664 status = nfs_ok; 3665 out: 3666 nfsd4_put_session(session); 3667 out_no_session: 3668 return status; 3669 } 3670 3671 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid) 3672 { 3673 if (!cstate->session) 3674 return false; 3675 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); 3676 } 3677 3678 __be32 3679 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, 3680 union nfsd4_op_u *u) 3681 { 3682 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; 3683 struct nfsd4_session *ses; 3684 __be32 status; 3685 int ref_held_by_me = 0; 3686 struct net *net = SVC_NET(r); 3687 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3688 3689 status = nfserr_not_only_op; 3690 if (nfsd4_compound_in_session(cstate, sessionid)) { 3691 if (!nfsd4_last_compound_op(r)) 3692 goto out; 3693 ref_held_by_me++; 3694 } 3695 dump_sessionid(__func__, sessionid); 3696 spin_lock(&nn->client_lock); 3697 ses = find_in_sessionid_hashtbl(sessionid, net, &status); 3698 if (!ses) 3699 goto out_client_lock; 3700 status = nfserr_wrong_cred; 3701 if (!nfsd4_mach_creds_match(ses->se_client, r)) 3702 goto out_put_session; 3703 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 3704 if (status) 3705 goto out_put_session; 3706 unhash_session(ses); 3707 spin_unlock(&nn->client_lock); 3708 3709 nfsd4_probe_callback_sync(ses->se_client); 3710 3711 spin_lock(&nn->client_lock); 3712 status = nfs_ok; 3713 out_put_session: 3714 nfsd4_put_session_locked(ses); 3715 out_client_lock: 3716 spin_unlock(&nn->client_lock); 3717 out: 3718 return status; 3719 } 3720 3721 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 3722 { 3723 struct nfs4_client *clp = ses->se_client; 3724 struct nfsd4_conn *c; 3725 __be32 status = nfs_ok; 3726 int ret; 3727 3728 spin_lock(&clp->cl_lock); 3729 c = __nfsd4_find_conn(new->cn_xprt, ses); 3730 if (c) 3731 goto out_free; 3732 status = nfserr_conn_not_bound_to_session; 3733 if (clp->cl_mach_cred) 3734 goto out_free; 3735 __nfsd4_hash_conn(new, ses); 3736 spin_unlock(&clp->cl_lock); 3737 ret = nfsd4_register_conn(new); 3738 if (ret) 3739 /* oops; xprt is already down: */ 3740 nfsd4_conn_lost(&new->cn_xpt_user); 3741 return nfs_ok; 3742 out_free: 3743 spin_unlock(&clp->cl_lock); 3744 free_conn(new); 3745 return status; 3746 } 3747 3748 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 3749 { 3750 struct nfsd4_compoundargs *args = rqstp->rq_argp; 3751 3752 return args->opcnt > session->se_fchannel.maxops; 3753 } 3754 3755 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 3756 struct nfsd4_session *session) 3757 { 3758 struct xdr_buf *xb = &rqstp->rq_arg; 3759 3760 return xb->len > session->se_fchannel.maxreq_sz; 3761 } 3762 3763 static bool replay_matches_cache(struct svc_rqst *rqstp, 3764 struct nfsd4_sequence *seq, struct nfsd4_slot *slot) 3765 { 3766 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 3767 3768 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != 3769 (bool)seq->cachethis) 3770 return false; 3771 /* 3772 * If there's an error then the reply can have fewer ops than 3773 * the call. 3774 */ 3775 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) 3776 return false; 3777 /* 3778 * But if we cached a reply with *more* ops than the call you're 3779 * sending us now, then this new call is clearly not really a 3780 * replay of the old one: 3781 */ 3782 if (slot->sl_opcnt > argp->opcnt) 3783 return false; 3784 /* This is the only check explicitly called by spec: */ 3785 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) 3786 return false; 3787 /* 3788 * There may be more comparisons we could actually do, but the 3789 * spec doesn't require us to catch every case where the calls 3790 * don't match (that would require caching the call as well as 3791 * the reply), so we don't bother. 3792 */ 3793 return true; 3794 } 3795 3796 __be32 3797 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3798 union nfsd4_op_u *u) 3799 { 3800 struct nfsd4_sequence *seq = &u->sequence; 3801 struct nfsd4_compoundres *resp = rqstp->rq_resp; 3802 struct xdr_stream *xdr = resp->xdr; 3803 struct nfsd4_session *session; 3804 struct nfs4_client *clp; 3805 struct nfsd4_slot *slot; 3806 struct nfsd4_conn *conn; 3807 __be32 status; 3808 int buflen; 3809 struct net *net = SVC_NET(rqstp); 3810 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3811 3812 if (resp->opcnt != 1) 3813 return nfserr_sequence_pos; 3814 3815 /* 3816 * Will be either used or freed by nfsd4_sequence_check_conn 3817 * below. 3818 */ 3819 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 3820 if (!conn) 3821 return nfserr_jukebox; 3822 3823 spin_lock(&nn->client_lock); 3824 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 3825 if (!session) 3826 goto out_no_session; 3827 clp = session->se_client; 3828 3829 status = nfserr_too_many_ops; 3830 if (nfsd4_session_too_many_ops(rqstp, session)) 3831 goto out_put_session; 3832 3833 status = nfserr_req_too_big; 3834 if (nfsd4_request_too_big(rqstp, session)) 3835 goto out_put_session; 3836 3837 status = nfserr_badslot; 3838 if (seq->slotid >= session->se_fchannel.maxreqs) 3839 goto out_put_session; 3840 3841 slot = session->se_slots[seq->slotid]; 3842 dprintk("%s: slotid %d\n", __func__, seq->slotid); 3843 3844 /* We do not negotiate the number of slots yet, so set the 3845 * maxslots to the session maxreqs which is used to encode 3846 * sr_highest_slotid and the sr_target_slot id to maxslots */ 3847 seq->maxslots = session->se_fchannel.maxreqs; 3848 3849 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 3850 slot->sl_flags & NFSD4_SLOT_INUSE); 3851 if (status == nfserr_replay_cache) { 3852 status = nfserr_seq_misordered; 3853 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 3854 goto out_put_session; 3855 status = nfserr_seq_false_retry; 3856 if (!replay_matches_cache(rqstp, seq, slot)) 3857 goto out_put_session; 3858 cstate->slot = slot; 3859 cstate->session = session; 3860 cstate->clp = clp; 3861 /* Return the cached reply status and set cstate->status 3862 * for nfsd4_proc_compound processing */ 3863 status = nfsd4_replay_cache_entry(resp, seq); 3864 cstate->status = nfserr_replay_cache; 3865 goto out; 3866 } 3867 if (status) 3868 goto out_put_session; 3869 3870 status = nfsd4_sequence_check_conn(conn, session); 3871 conn = NULL; 3872 if (status) 3873 goto out_put_session; 3874 3875 buflen = (seq->cachethis) ? 3876 session->se_fchannel.maxresp_cached : 3877 session->se_fchannel.maxresp_sz; 3878 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 3879 nfserr_rep_too_big; 3880 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 3881 goto out_put_session; 3882 svc_reserve(rqstp, buflen); 3883 3884 status = nfs_ok; 3885 /* Success! bump slot seqid */ 3886 slot->sl_seqid = seq->seqid; 3887 slot->sl_flags |= NFSD4_SLOT_INUSE; 3888 if (seq->cachethis) 3889 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 3890 else 3891 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 3892 3893 cstate->slot = slot; 3894 cstate->session = session; 3895 cstate->clp = clp; 3896 3897 out: 3898 switch (clp->cl_cb_state) { 3899 case NFSD4_CB_DOWN: 3900 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 3901 break; 3902 case NFSD4_CB_FAULT: 3903 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 3904 break; 3905 default: 3906 seq->status_flags = 0; 3907 } 3908 if (!list_empty(&clp->cl_revoked)) 3909 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 3910 out_no_session: 3911 if (conn) 3912 free_conn(conn); 3913 spin_unlock(&nn->client_lock); 3914 return status; 3915 out_put_session: 3916 nfsd4_put_session_locked(session); 3917 goto out_no_session; 3918 } 3919 3920 void 3921 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 3922 { 3923 struct nfsd4_compound_state *cs = &resp->cstate; 3924 3925 if (nfsd4_has_session(cs)) { 3926 if (cs->status != nfserr_replay_cache) { 3927 nfsd4_store_cache_entry(resp); 3928 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 3929 } 3930 /* Drop session reference that was taken in nfsd4_sequence() */ 3931 nfsd4_put_session(cs->session); 3932 } else if (cs->clp) 3933 put_client_renew(cs->clp); 3934 } 3935 3936 __be32 3937 nfsd4_destroy_clientid(struct svc_rqst *rqstp, 3938 struct nfsd4_compound_state *cstate, 3939 union nfsd4_op_u *u) 3940 { 3941 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; 3942 struct nfs4_client *conf, *unconf; 3943 struct nfs4_client *clp = NULL; 3944 __be32 status = 0; 3945 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3946 3947 spin_lock(&nn->client_lock); 3948 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 3949 conf = find_confirmed_client(&dc->clientid, true, nn); 3950 WARN_ON_ONCE(conf && unconf); 3951 3952 if (conf) { 3953 if (client_has_state(conf)) { 3954 status = nfserr_clientid_busy; 3955 goto out; 3956 } 3957 status = mark_client_expired_locked(conf); 3958 if (status) 3959 goto out; 3960 clp = conf; 3961 } else if (unconf) 3962 clp = unconf; 3963 else { 3964 status = nfserr_stale_clientid; 3965 goto out; 3966 } 3967 if (!nfsd4_mach_creds_match(clp, rqstp)) { 3968 clp = NULL; 3969 status = nfserr_wrong_cred; 3970 goto out; 3971 } 3972 trace_nfsd_clid_destroyed(&clp->cl_clientid); 3973 unhash_client_locked(clp); 3974 out: 3975 spin_unlock(&nn->client_lock); 3976 if (clp) 3977 expire_client(clp); 3978 return status; 3979 } 3980 3981 __be32 3982 nfsd4_reclaim_complete(struct svc_rqst *rqstp, 3983 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3984 { 3985 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; 3986 struct nfs4_client *clp = cstate->clp; 3987 __be32 status = 0; 3988 3989 if (rc->rca_one_fs) { 3990 if (!cstate->current_fh.fh_dentry) 3991 return nfserr_nofilehandle; 3992 /* 3993 * We don't take advantage of the rca_one_fs case. 3994 * That's OK, it's optional, we can safely ignore it. 3995 */ 3996 return nfs_ok; 3997 } 3998 3999 status = nfserr_complete_already; 4000 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 4001 goto out; 4002 4003 status = nfserr_stale_clientid; 4004 if (is_client_expired(clp)) 4005 /* 4006 * The following error isn't really legal. 4007 * But we only get here if the client just explicitly 4008 * destroyed the client. Surely it no longer cares what 4009 * error it gets back on an operation for the dead 4010 * client. 4011 */ 4012 goto out; 4013 4014 status = nfs_ok; 4015 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); 4016 nfsd4_client_record_create(clp); 4017 inc_reclaim_complete(clp); 4018 out: 4019 return status; 4020 } 4021 4022 __be32 4023 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4024 union nfsd4_op_u *u) 4025 { 4026 struct nfsd4_setclientid *setclid = &u->setclientid; 4027 struct xdr_netobj clname = setclid->se_name; 4028 nfs4_verifier clverifier = setclid->se_verf; 4029 struct nfs4_client *conf, *new; 4030 struct nfs4_client *unconf = NULL; 4031 __be32 status; 4032 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4033 4034 new = create_client(clname, rqstp, &clverifier); 4035 if (new == NULL) 4036 return nfserr_jukebox; 4037 spin_lock(&nn->client_lock); 4038 conf = find_confirmed_client_by_name(&clname, nn); 4039 if (conf && client_has_state(conf)) { 4040 status = nfserr_clid_inuse; 4041 if (clp_used_exchangeid(conf)) 4042 goto out; 4043 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4044 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4045 goto out; 4046 } 4047 } 4048 unconf = find_unconfirmed_client_by_name(&clname, nn); 4049 if (unconf) 4050 unhash_client_locked(unconf); 4051 if (conf) { 4052 if (same_verf(&conf->cl_verifier, &clverifier)) { 4053 copy_clid(new, conf); 4054 gen_confirm(new, nn); 4055 } else 4056 trace_nfsd_clid_verf_mismatch(conf, rqstp, 4057 &clverifier); 4058 } else 4059 trace_nfsd_clid_fresh(new); 4060 new->cl_minorversion = 0; 4061 gen_callback(new, setclid, rqstp); 4062 add_to_unconfirmed(new); 4063 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 4064 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 4065 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 4066 new = NULL; 4067 status = nfs_ok; 4068 out: 4069 spin_unlock(&nn->client_lock); 4070 if (new) 4071 free_client(new); 4072 if (unconf) { 4073 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 4074 expire_client(unconf); 4075 } 4076 return status; 4077 } 4078 4079 __be32 4080 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 4081 struct nfsd4_compound_state *cstate, 4082 union nfsd4_op_u *u) 4083 { 4084 struct nfsd4_setclientid_confirm *setclientid_confirm = 4085 &u->setclientid_confirm; 4086 struct nfs4_client *conf, *unconf; 4087 struct nfs4_client *old = NULL; 4088 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 4089 clientid_t * clid = &setclientid_confirm->sc_clientid; 4090 __be32 status; 4091 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4092 4093 if (STALE_CLIENTID(clid, nn)) 4094 return nfserr_stale_clientid; 4095 4096 spin_lock(&nn->client_lock); 4097 conf = find_confirmed_client(clid, false, nn); 4098 unconf = find_unconfirmed_client(clid, false, nn); 4099 /* 4100 * We try hard to give out unique clientid's, so if we get an 4101 * attempt to confirm the same clientid with a different cred, 4102 * the client may be buggy; this should never happen. 4103 * 4104 * Nevertheless, RFC 7530 recommends INUSE for this case: 4105 */ 4106 status = nfserr_clid_inuse; 4107 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { 4108 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 4109 goto out; 4110 } 4111 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4112 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4113 goto out; 4114 } 4115 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 4116 if (conf && same_verf(&confirm, &conf->cl_confirm)) { 4117 status = nfs_ok; 4118 } else 4119 status = nfserr_stale_clientid; 4120 goto out; 4121 } 4122 status = nfs_ok; 4123 if (conf) { 4124 old = unconf; 4125 unhash_client_locked(old); 4126 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 4127 } else { 4128 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 4129 if (old) { 4130 status = nfserr_clid_inuse; 4131 if (client_has_state(old) 4132 && !same_creds(&unconf->cl_cred, 4133 &old->cl_cred)) 4134 goto out; 4135 status = mark_client_expired_locked(old); 4136 if (status) { 4137 old = NULL; 4138 goto out; 4139 } 4140 trace_nfsd_clid_replaced(&old->cl_clientid); 4141 } 4142 move_to_confirmed(unconf); 4143 conf = unconf; 4144 } 4145 get_client_locked(conf); 4146 spin_unlock(&nn->client_lock); 4147 if (conf == unconf) 4148 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 4149 nfsd4_probe_callback(conf); 4150 spin_lock(&nn->client_lock); 4151 put_client_renew_locked(conf); 4152 out: 4153 spin_unlock(&nn->client_lock); 4154 if (old) 4155 expire_client(old); 4156 return status; 4157 } 4158 4159 static struct nfs4_file *nfsd4_alloc_file(void) 4160 { 4161 return kmem_cache_alloc(file_slab, GFP_KERNEL); 4162 } 4163 4164 /* OPEN Share state helper functions */ 4165 static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval, 4166 struct nfs4_file *fp) 4167 { 4168 lockdep_assert_held(&state_lock); 4169 4170 refcount_set(&fp->fi_ref, 1); 4171 spin_lock_init(&fp->fi_lock); 4172 INIT_LIST_HEAD(&fp->fi_stateids); 4173 INIT_LIST_HEAD(&fp->fi_delegations); 4174 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 4175 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); 4176 fp->fi_deleg_file = NULL; 4177 fp->fi_had_conflict = false; 4178 fp->fi_share_deny = 0; 4179 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 4180 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 4181 fp->fi_aliased = false; 4182 fp->fi_inode = d_inode(fh->fh_dentry); 4183 #ifdef CONFIG_NFSD_PNFS 4184 INIT_LIST_HEAD(&fp->fi_lo_states); 4185 atomic_set(&fp->fi_lo_recalls, 0); 4186 #endif 4187 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); 4188 } 4189 4190 void 4191 nfsd4_free_slabs(void) 4192 { 4193 kmem_cache_destroy(client_slab); 4194 kmem_cache_destroy(openowner_slab); 4195 kmem_cache_destroy(lockowner_slab); 4196 kmem_cache_destroy(file_slab); 4197 kmem_cache_destroy(stateid_slab); 4198 kmem_cache_destroy(deleg_slab); 4199 kmem_cache_destroy(odstate_slab); 4200 } 4201 4202 int 4203 nfsd4_init_slabs(void) 4204 { 4205 client_slab = kmem_cache_create("nfsd4_clients", 4206 sizeof(struct nfs4_client), 0, 0, NULL); 4207 if (client_slab == NULL) 4208 goto out; 4209 openowner_slab = kmem_cache_create("nfsd4_openowners", 4210 sizeof(struct nfs4_openowner), 0, 0, NULL); 4211 if (openowner_slab == NULL) 4212 goto out_free_client_slab; 4213 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 4214 sizeof(struct nfs4_lockowner), 0, 0, NULL); 4215 if (lockowner_slab == NULL) 4216 goto out_free_openowner_slab; 4217 file_slab = kmem_cache_create("nfsd4_files", 4218 sizeof(struct nfs4_file), 0, 0, NULL); 4219 if (file_slab == NULL) 4220 goto out_free_lockowner_slab; 4221 stateid_slab = kmem_cache_create("nfsd4_stateids", 4222 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 4223 if (stateid_slab == NULL) 4224 goto out_free_file_slab; 4225 deleg_slab = kmem_cache_create("nfsd4_delegations", 4226 sizeof(struct nfs4_delegation), 0, 0, NULL); 4227 if (deleg_slab == NULL) 4228 goto out_free_stateid_slab; 4229 odstate_slab = kmem_cache_create("nfsd4_odstate", 4230 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); 4231 if (odstate_slab == NULL) 4232 goto out_free_deleg_slab; 4233 return 0; 4234 4235 out_free_deleg_slab: 4236 kmem_cache_destroy(deleg_slab); 4237 out_free_stateid_slab: 4238 kmem_cache_destroy(stateid_slab); 4239 out_free_file_slab: 4240 kmem_cache_destroy(file_slab); 4241 out_free_lockowner_slab: 4242 kmem_cache_destroy(lockowner_slab); 4243 out_free_openowner_slab: 4244 kmem_cache_destroy(openowner_slab); 4245 out_free_client_slab: 4246 kmem_cache_destroy(client_slab); 4247 out: 4248 return -ENOMEM; 4249 } 4250 4251 static void init_nfs4_replay(struct nfs4_replay *rp) 4252 { 4253 rp->rp_status = nfserr_serverfault; 4254 rp->rp_buflen = 0; 4255 rp->rp_buf = rp->rp_ibuf; 4256 mutex_init(&rp->rp_mutex); 4257 } 4258 4259 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 4260 struct nfs4_stateowner *so) 4261 { 4262 if (!nfsd4_has_session(cstate)) { 4263 mutex_lock(&so->so_replay.rp_mutex); 4264 cstate->replay_owner = nfs4_get_stateowner(so); 4265 } 4266 } 4267 4268 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 4269 { 4270 struct nfs4_stateowner *so = cstate->replay_owner; 4271 4272 if (so != NULL) { 4273 cstate->replay_owner = NULL; 4274 mutex_unlock(&so->so_replay.rp_mutex); 4275 nfs4_put_stateowner(so); 4276 } 4277 } 4278 4279 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 4280 { 4281 struct nfs4_stateowner *sop; 4282 4283 sop = kmem_cache_alloc(slab, GFP_KERNEL); 4284 if (!sop) 4285 return NULL; 4286 4287 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); 4288 if (!sop->so_owner.data) { 4289 kmem_cache_free(slab, sop); 4290 return NULL; 4291 } 4292 4293 INIT_LIST_HEAD(&sop->so_stateids); 4294 sop->so_client = clp; 4295 init_nfs4_replay(&sop->so_replay); 4296 atomic_set(&sop->so_count, 1); 4297 return sop; 4298 } 4299 4300 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 4301 { 4302 lockdep_assert_held(&clp->cl_lock); 4303 4304 list_add(&oo->oo_owner.so_strhash, 4305 &clp->cl_ownerstr_hashtbl[strhashval]); 4306 list_add(&oo->oo_perclient, &clp->cl_openowners); 4307 } 4308 4309 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 4310 { 4311 unhash_openowner_locked(openowner(so)); 4312 } 4313 4314 static void nfs4_free_openowner(struct nfs4_stateowner *so) 4315 { 4316 struct nfs4_openowner *oo = openowner(so); 4317 4318 kmem_cache_free(openowner_slab, oo); 4319 } 4320 4321 static const struct nfs4_stateowner_operations openowner_ops = { 4322 .so_unhash = nfs4_unhash_openowner, 4323 .so_free = nfs4_free_openowner, 4324 }; 4325 4326 static struct nfs4_ol_stateid * 4327 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4328 { 4329 struct nfs4_ol_stateid *local, *ret = NULL; 4330 struct nfs4_openowner *oo = open->op_openowner; 4331 4332 lockdep_assert_held(&fp->fi_lock); 4333 4334 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 4335 /* ignore lock owners */ 4336 if (local->st_stateowner->so_is_open_owner == 0) 4337 continue; 4338 if (local->st_stateowner != &oo->oo_owner) 4339 continue; 4340 if (local->st_stid.sc_type == NFS4_OPEN_STID) { 4341 ret = local; 4342 refcount_inc(&ret->st_stid.sc_count); 4343 break; 4344 } 4345 } 4346 return ret; 4347 } 4348 4349 static __be32 4350 nfsd4_verify_open_stid(struct nfs4_stid *s) 4351 { 4352 __be32 ret = nfs_ok; 4353 4354 switch (s->sc_type) { 4355 default: 4356 break; 4357 case 0: 4358 case NFS4_CLOSED_STID: 4359 case NFS4_CLOSED_DELEG_STID: 4360 ret = nfserr_bad_stateid; 4361 break; 4362 case NFS4_REVOKED_DELEG_STID: 4363 ret = nfserr_deleg_revoked; 4364 } 4365 return ret; 4366 } 4367 4368 /* Lock the stateid st_mutex, and deal with races with CLOSE */ 4369 static __be32 4370 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) 4371 { 4372 __be32 ret; 4373 4374 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); 4375 ret = nfsd4_verify_open_stid(&stp->st_stid); 4376 if (ret != nfs_ok) 4377 mutex_unlock(&stp->st_mutex); 4378 return ret; 4379 } 4380 4381 static struct nfs4_ol_stateid * 4382 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4383 { 4384 struct nfs4_ol_stateid *stp; 4385 for (;;) { 4386 spin_lock(&fp->fi_lock); 4387 stp = nfsd4_find_existing_open(fp, open); 4388 spin_unlock(&fp->fi_lock); 4389 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) 4390 break; 4391 nfs4_put_stid(&stp->st_stid); 4392 } 4393 return stp; 4394 } 4395 4396 static struct nfs4_openowner * 4397 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 4398 struct nfsd4_compound_state *cstate) 4399 { 4400 struct nfs4_client *clp = cstate->clp; 4401 struct nfs4_openowner *oo, *ret; 4402 4403 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 4404 if (!oo) 4405 return NULL; 4406 oo->oo_owner.so_ops = &openowner_ops; 4407 oo->oo_owner.so_is_open_owner = 1; 4408 oo->oo_owner.so_seqid = open->op_seqid; 4409 oo->oo_flags = 0; 4410 if (nfsd4_has_session(cstate)) 4411 oo->oo_flags |= NFS4_OO_CONFIRMED; 4412 oo->oo_time = 0; 4413 oo->oo_last_closed_stid = NULL; 4414 INIT_LIST_HEAD(&oo->oo_close_lru); 4415 spin_lock(&clp->cl_lock); 4416 ret = find_openstateowner_str_locked(strhashval, open, clp); 4417 if (ret == NULL) { 4418 hash_openowner(oo, clp, strhashval); 4419 ret = oo; 4420 } else 4421 nfs4_free_stateowner(&oo->oo_owner); 4422 4423 spin_unlock(&clp->cl_lock); 4424 return ret; 4425 } 4426 4427 static struct nfs4_ol_stateid * 4428 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) 4429 { 4430 4431 struct nfs4_openowner *oo = open->op_openowner; 4432 struct nfs4_ol_stateid *retstp = NULL; 4433 struct nfs4_ol_stateid *stp; 4434 4435 stp = open->op_stp; 4436 /* We are moving these outside of the spinlocks to avoid the warnings */ 4437 mutex_init(&stp->st_mutex); 4438 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 4439 4440 retry: 4441 spin_lock(&oo->oo_owner.so_client->cl_lock); 4442 spin_lock(&fp->fi_lock); 4443 4444 retstp = nfsd4_find_existing_open(fp, open); 4445 if (retstp) 4446 goto out_unlock; 4447 4448 open->op_stp = NULL; 4449 refcount_inc(&stp->st_stid.sc_count); 4450 stp->st_stid.sc_type = NFS4_OPEN_STID; 4451 INIT_LIST_HEAD(&stp->st_locks); 4452 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 4453 get_nfs4_file(fp); 4454 stp->st_stid.sc_file = fp; 4455 stp->st_access_bmap = 0; 4456 stp->st_deny_bmap = 0; 4457 stp->st_openstp = NULL; 4458 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 4459 list_add(&stp->st_perfile, &fp->fi_stateids); 4460 4461 out_unlock: 4462 spin_unlock(&fp->fi_lock); 4463 spin_unlock(&oo->oo_owner.so_client->cl_lock); 4464 if (retstp) { 4465 /* Handle races with CLOSE */ 4466 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 4467 nfs4_put_stid(&retstp->st_stid); 4468 goto retry; 4469 } 4470 /* To keep mutex tracking happy */ 4471 mutex_unlock(&stp->st_mutex); 4472 stp = retstp; 4473 } 4474 return stp; 4475 } 4476 4477 /* 4478 * In the 4.0 case we need to keep the owners around a little while to handle 4479 * CLOSE replay. We still do need to release any file access that is held by 4480 * them before returning however. 4481 */ 4482 static void 4483 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 4484 { 4485 struct nfs4_ol_stateid *last; 4486 struct nfs4_openowner *oo = openowner(s->st_stateowner); 4487 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 4488 nfsd_net_id); 4489 4490 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 4491 4492 /* 4493 * We know that we hold one reference via nfsd4_close, and another 4494 * "persistent" reference for the client. If the refcount is higher 4495 * than 2, then there are still calls in progress that are using this 4496 * stateid. We can't put the sc_file reference until they are finished. 4497 * Wait for the refcount to drop to 2. Since it has been unhashed, 4498 * there should be no danger of the refcount going back up again at 4499 * this point. 4500 */ 4501 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); 4502 4503 release_all_access(s); 4504 if (s->st_stid.sc_file) { 4505 put_nfs4_file(s->st_stid.sc_file); 4506 s->st_stid.sc_file = NULL; 4507 } 4508 4509 spin_lock(&nn->client_lock); 4510 last = oo->oo_last_closed_stid; 4511 oo->oo_last_closed_stid = s; 4512 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 4513 oo->oo_time = ktime_get_boottime_seconds(); 4514 spin_unlock(&nn->client_lock); 4515 if (last) 4516 nfs4_put_stid(&last->st_stid); 4517 } 4518 4519 /* search file_hashtbl[] for file */ 4520 static struct nfs4_file * 4521 find_file_locked(struct svc_fh *fh, unsigned int hashval) 4522 { 4523 struct nfs4_file *fp; 4524 4525 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, 4526 lockdep_is_held(&state_lock)) { 4527 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { 4528 if (refcount_inc_not_zero(&fp->fi_ref)) 4529 return fp; 4530 } 4531 } 4532 return NULL; 4533 } 4534 4535 static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, 4536 unsigned int hashval) 4537 { 4538 struct nfs4_file *fp; 4539 struct nfs4_file *ret = NULL; 4540 bool alias_found = false; 4541 4542 spin_lock(&state_lock); 4543 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, 4544 lockdep_is_held(&state_lock)) { 4545 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { 4546 if (refcount_inc_not_zero(&fp->fi_ref)) 4547 ret = fp; 4548 } else if (d_inode(fh->fh_dentry) == fp->fi_inode) 4549 fp->fi_aliased = alias_found = true; 4550 } 4551 if (likely(ret == NULL)) { 4552 nfsd4_init_file(fh, hashval, new); 4553 new->fi_aliased = alias_found; 4554 ret = new; 4555 } 4556 spin_unlock(&state_lock); 4557 return ret; 4558 } 4559 4560 static struct nfs4_file * find_file(struct svc_fh *fh) 4561 { 4562 struct nfs4_file *fp; 4563 unsigned int hashval = file_hashval(fh); 4564 4565 rcu_read_lock(); 4566 fp = find_file_locked(fh, hashval); 4567 rcu_read_unlock(); 4568 return fp; 4569 } 4570 4571 static struct nfs4_file * 4572 find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) 4573 { 4574 struct nfs4_file *fp; 4575 unsigned int hashval = file_hashval(fh); 4576 4577 rcu_read_lock(); 4578 fp = find_file_locked(fh, hashval); 4579 rcu_read_unlock(); 4580 if (fp) 4581 return fp; 4582 4583 return insert_file(new, fh, hashval); 4584 } 4585 4586 /* 4587 * Called to check deny when READ with all zero stateid or 4588 * WRITE with all zero or all one stateid 4589 */ 4590 static __be32 4591 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 4592 { 4593 struct nfs4_file *fp; 4594 __be32 ret = nfs_ok; 4595 4596 fp = find_file(current_fh); 4597 if (!fp) 4598 return ret; 4599 /* Check for conflicting share reservations */ 4600 spin_lock(&fp->fi_lock); 4601 if (fp->fi_share_deny & deny_type) 4602 ret = nfserr_locked; 4603 spin_unlock(&fp->fi_lock); 4604 put_nfs4_file(fp); 4605 return ret; 4606 } 4607 4608 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 4609 { 4610 struct nfs4_delegation *dp = cb_to_delegation(cb); 4611 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 4612 nfsd_net_id); 4613 4614 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 4615 4616 /* 4617 * We can't do this in nfsd_break_deleg_cb because it is 4618 * already holding inode->i_lock. 4619 * 4620 * If the dl_time != 0, then we know that it has already been 4621 * queued for a lease break. Don't queue it again. 4622 */ 4623 spin_lock(&state_lock); 4624 if (delegation_hashed(dp) && dp->dl_time == 0) { 4625 dp->dl_time = ktime_get_boottime_seconds(); 4626 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 4627 } 4628 spin_unlock(&state_lock); 4629 } 4630 4631 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 4632 struct rpc_task *task) 4633 { 4634 struct nfs4_delegation *dp = cb_to_delegation(cb); 4635 4636 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID || 4637 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) 4638 return 1; 4639 4640 switch (task->tk_status) { 4641 case 0: 4642 return 1; 4643 case -NFS4ERR_DELAY: 4644 rpc_delay(task, 2 * HZ); 4645 return 0; 4646 case -EBADHANDLE: 4647 case -NFS4ERR_BAD_STATEID: 4648 /* 4649 * Race: client probably got cb_recall before open reply 4650 * granting delegation. 4651 */ 4652 if (dp->dl_retries--) { 4653 rpc_delay(task, 2 * HZ); 4654 return 0; 4655 } 4656 fallthrough; 4657 default: 4658 return 1; 4659 } 4660 } 4661 4662 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 4663 { 4664 struct nfs4_delegation *dp = cb_to_delegation(cb); 4665 4666 nfs4_put_stid(&dp->dl_stid); 4667 } 4668 4669 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 4670 .prepare = nfsd4_cb_recall_prepare, 4671 .done = nfsd4_cb_recall_done, 4672 .release = nfsd4_cb_recall_release, 4673 }; 4674 4675 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 4676 { 4677 /* 4678 * We're assuming the state code never drops its reference 4679 * without first removing the lease. Since we're in this lease 4680 * callback (and since the lease code is serialized by the 4681 * i_lock) we know the server hasn't removed the lease yet, and 4682 * we know it's safe to take a reference. 4683 */ 4684 refcount_inc(&dp->dl_stid.sc_count); 4685 nfsd4_run_cb(&dp->dl_recall); 4686 } 4687 4688 /* Called from break_lease() with i_lock held. */ 4689 static bool 4690 nfsd_break_deleg_cb(struct file_lock *fl) 4691 { 4692 bool ret = false; 4693 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; 4694 struct nfs4_file *fp = dp->dl_stid.sc_file; 4695 4696 trace_nfsd_cb_recall(&dp->dl_stid); 4697 4698 /* 4699 * We don't want the locks code to timeout the lease for us; 4700 * we'll remove it ourself if a delegation isn't returned 4701 * in time: 4702 */ 4703 fl->fl_break_time = 0; 4704 4705 spin_lock(&fp->fi_lock); 4706 fp->fi_had_conflict = true; 4707 nfsd_break_one_deleg(dp); 4708 spin_unlock(&fp->fi_lock); 4709 return ret; 4710 } 4711 4712 static bool nfsd_breaker_owns_lease(struct file_lock *fl) 4713 { 4714 struct nfs4_delegation *dl = fl->fl_owner; 4715 struct svc_rqst *rqst; 4716 struct nfs4_client *clp; 4717 4718 if (!i_am_nfsd()) 4719 return NULL; 4720 rqst = kthread_data(current); 4721 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ 4722 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) 4723 return NULL; 4724 clp = *(rqst->rq_lease_breaker); 4725 return dl->dl_stid.sc_client == clp; 4726 } 4727 4728 static int 4729 nfsd_change_deleg_cb(struct file_lock *onlist, int arg, 4730 struct list_head *dispose) 4731 { 4732 if (arg & F_UNLCK) 4733 return lease_modify(onlist, arg, dispose); 4734 else 4735 return -EAGAIN; 4736 } 4737 4738 static const struct lock_manager_operations nfsd_lease_mng_ops = { 4739 .lm_breaker_owns_lease = nfsd_breaker_owns_lease, 4740 .lm_break = nfsd_break_deleg_cb, 4741 .lm_change = nfsd_change_deleg_cb, 4742 }; 4743 4744 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 4745 { 4746 if (nfsd4_has_session(cstate)) 4747 return nfs_ok; 4748 if (seqid == so->so_seqid - 1) 4749 return nfserr_replay_me; 4750 if (seqid == so->so_seqid) 4751 return nfs_ok; 4752 return nfserr_bad_seqid; 4753 } 4754 4755 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions, 4756 struct nfsd_net *nn) 4757 { 4758 struct nfs4_client *found; 4759 4760 spin_lock(&nn->client_lock); 4761 found = find_confirmed_client(clid, sessions, nn); 4762 if (found) 4763 atomic_inc(&found->cl_rpc_users); 4764 spin_unlock(&nn->client_lock); 4765 return found; 4766 } 4767 4768 static __be32 set_client(clientid_t *clid, 4769 struct nfsd4_compound_state *cstate, 4770 struct nfsd_net *nn) 4771 { 4772 if (cstate->clp) { 4773 if (!same_clid(&cstate->clp->cl_clientid, clid)) 4774 return nfserr_stale_clientid; 4775 return nfs_ok; 4776 } 4777 if (STALE_CLIENTID(clid, nn)) 4778 return nfserr_stale_clientid; 4779 /* 4780 * We're in the 4.0 case (otherwise the SEQUENCE op would have 4781 * set cstate->clp), so session = false: 4782 */ 4783 cstate->clp = lookup_clientid(clid, false, nn); 4784 if (!cstate->clp) 4785 return nfserr_expired; 4786 return nfs_ok; 4787 } 4788 4789 __be32 4790 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 4791 struct nfsd4_open *open, struct nfsd_net *nn) 4792 { 4793 clientid_t *clientid = &open->op_clientid; 4794 struct nfs4_client *clp = NULL; 4795 unsigned int strhashval; 4796 struct nfs4_openowner *oo = NULL; 4797 __be32 status; 4798 4799 /* 4800 * In case we need it later, after we've already created the 4801 * file and don't want to risk a further failure: 4802 */ 4803 open->op_file = nfsd4_alloc_file(); 4804 if (open->op_file == NULL) 4805 return nfserr_jukebox; 4806 4807 status = set_client(clientid, cstate, nn); 4808 if (status) 4809 return status; 4810 clp = cstate->clp; 4811 4812 strhashval = ownerstr_hashval(&open->op_owner); 4813 oo = find_openstateowner_str(strhashval, open, clp); 4814 open->op_openowner = oo; 4815 if (!oo) { 4816 goto new_owner; 4817 } 4818 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 4819 /* Replace unconfirmed owners without checking for replay. */ 4820 release_openowner(oo); 4821 open->op_openowner = NULL; 4822 goto new_owner; 4823 } 4824 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 4825 if (status) 4826 return status; 4827 goto alloc_stateid; 4828 new_owner: 4829 oo = alloc_init_open_stateowner(strhashval, open, cstate); 4830 if (oo == NULL) 4831 return nfserr_jukebox; 4832 open->op_openowner = oo; 4833 alloc_stateid: 4834 open->op_stp = nfs4_alloc_open_stateid(clp); 4835 if (!open->op_stp) 4836 return nfserr_jukebox; 4837 4838 if (nfsd4_has_session(cstate) && 4839 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 4840 open->op_odstate = alloc_clnt_odstate(clp); 4841 if (!open->op_odstate) 4842 return nfserr_jukebox; 4843 } 4844 4845 return nfs_ok; 4846 } 4847 4848 static inline __be32 4849 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 4850 { 4851 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 4852 return nfserr_openmode; 4853 else 4854 return nfs_ok; 4855 } 4856 4857 static int share_access_to_flags(u32 share_access) 4858 { 4859 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 4860 } 4861 4862 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 4863 { 4864 struct nfs4_stid *ret; 4865 4866 ret = find_stateid_by_type(cl, s, 4867 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); 4868 if (!ret) 4869 return NULL; 4870 return delegstateid(ret); 4871 } 4872 4873 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 4874 { 4875 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 4876 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 4877 } 4878 4879 static __be32 4880 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 4881 struct nfs4_delegation **dp) 4882 { 4883 int flags; 4884 __be32 status = nfserr_bad_stateid; 4885 struct nfs4_delegation *deleg; 4886 4887 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 4888 if (deleg == NULL) 4889 goto out; 4890 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { 4891 nfs4_put_stid(&deleg->dl_stid); 4892 if (cl->cl_minorversion) 4893 status = nfserr_deleg_revoked; 4894 goto out; 4895 } 4896 flags = share_access_to_flags(open->op_share_access); 4897 status = nfs4_check_delegmode(deleg, flags); 4898 if (status) { 4899 nfs4_put_stid(&deleg->dl_stid); 4900 goto out; 4901 } 4902 *dp = deleg; 4903 out: 4904 if (!nfsd4_is_deleg_cur(open)) 4905 return nfs_ok; 4906 if (status) 4907 return status; 4908 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 4909 return nfs_ok; 4910 } 4911 4912 static inline int nfs4_access_to_access(u32 nfs4_access) 4913 { 4914 int flags = 0; 4915 4916 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 4917 flags |= NFSD_MAY_READ; 4918 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 4919 flags |= NFSD_MAY_WRITE; 4920 return flags; 4921 } 4922 4923 static inline __be32 4924 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 4925 struct nfsd4_open *open) 4926 { 4927 struct iattr iattr = { 4928 .ia_valid = ATTR_SIZE, 4929 .ia_size = 0, 4930 }; 4931 if (!open->op_truncate) 4932 return 0; 4933 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 4934 return nfserr_inval; 4935 return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0); 4936 } 4937 4938 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 4939 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 4940 struct nfsd4_open *open) 4941 { 4942 struct nfsd_file *nf = NULL; 4943 __be32 status; 4944 int oflag = nfs4_access_to_omode(open->op_share_access); 4945 int access = nfs4_access_to_access(open->op_share_access); 4946 unsigned char old_access_bmap, old_deny_bmap; 4947 4948 spin_lock(&fp->fi_lock); 4949 4950 /* 4951 * Are we trying to set a deny mode that would conflict with 4952 * current access? 4953 */ 4954 status = nfs4_file_check_deny(fp, open->op_share_deny); 4955 if (status != nfs_ok) { 4956 spin_unlock(&fp->fi_lock); 4957 goto out; 4958 } 4959 4960 /* set access to the file */ 4961 status = nfs4_file_get_access(fp, open->op_share_access); 4962 if (status != nfs_ok) { 4963 spin_unlock(&fp->fi_lock); 4964 goto out; 4965 } 4966 4967 /* Set access bits in stateid */ 4968 old_access_bmap = stp->st_access_bmap; 4969 set_access(open->op_share_access, stp); 4970 4971 /* Set new deny mask */ 4972 old_deny_bmap = stp->st_deny_bmap; 4973 set_deny(open->op_share_deny, stp); 4974 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 4975 4976 if (!fp->fi_fds[oflag]) { 4977 spin_unlock(&fp->fi_lock); 4978 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf); 4979 if (status) 4980 goto out_put_access; 4981 spin_lock(&fp->fi_lock); 4982 if (!fp->fi_fds[oflag]) { 4983 fp->fi_fds[oflag] = nf; 4984 nf = NULL; 4985 } 4986 } 4987 spin_unlock(&fp->fi_lock); 4988 if (nf) 4989 nfsd_file_put(nf); 4990 4991 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, 4992 access)); 4993 if (status) 4994 goto out_put_access; 4995 4996 status = nfsd4_truncate(rqstp, cur_fh, open); 4997 if (status) 4998 goto out_put_access; 4999 out: 5000 return status; 5001 out_put_access: 5002 stp->st_access_bmap = old_access_bmap; 5003 nfs4_file_put_access(fp, open->op_share_access); 5004 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 5005 goto out; 5006 } 5007 5008 static __be32 5009 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 5010 { 5011 __be32 status; 5012 unsigned char old_deny_bmap = stp->st_deny_bmap; 5013 5014 if (!test_access(open->op_share_access, stp)) 5015 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open); 5016 5017 /* test and set deny mode */ 5018 spin_lock(&fp->fi_lock); 5019 status = nfs4_file_check_deny(fp, open->op_share_deny); 5020 if (status == nfs_ok) { 5021 set_deny(open->op_share_deny, stp); 5022 fp->fi_share_deny |= 5023 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5024 } 5025 spin_unlock(&fp->fi_lock); 5026 5027 if (status != nfs_ok) 5028 return status; 5029 5030 status = nfsd4_truncate(rqstp, cur_fh, open); 5031 if (status != nfs_ok) 5032 reset_union_bmap_deny(old_deny_bmap, stp); 5033 return status; 5034 } 5035 5036 /* Should we give out recallable state?: */ 5037 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 5038 { 5039 if (clp->cl_cb_state == NFSD4_CB_UP) 5040 return true; 5041 /* 5042 * In the sessions case, since we don't have to establish a 5043 * separate connection for callbacks, we assume it's OK 5044 * until we hear otherwise: 5045 */ 5046 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 5047 } 5048 5049 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, 5050 int flag) 5051 { 5052 struct file_lock *fl; 5053 5054 fl = locks_alloc_lock(); 5055 if (!fl) 5056 return NULL; 5057 fl->fl_lmops = &nfsd_lease_mng_ops; 5058 fl->fl_flags = FL_DELEG; 5059 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 5060 fl->fl_end = OFFSET_MAX; 5061 fl->fl_owner = (fl_owner_t)dp; 5062 fl->fl_pid = current->tgid; 5063 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; 5064 return fl; 5065 } 5066 5067 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, 5068 struct nfs4_file *fp) 5069 { 5070 struct nfs4_ol_stateid *st; 5071 struct file *f = fp->fi_deleg_file->nf_file; 5072 struct inode *ino = locks_inode(f); 5073 int writes; 5074 5075 writes = atomic_read(&ino->i_writecount); 5076 if (!writes) 5077 return 0; 5078 /* 5079 * There could be multiple filehandles (hence multiple 5080 * nfs4_files) referencing this file, but that's not too 5081 * common; let's just give up in that case rather than 5082 * trying to go look up all the clients using that other 5083 * nfs4_file as well: 5084 */ 5085 if (fp->fi_aliased) 5086 return -EAGAIN; 5087 /* 5088 * If there's a close in progress, make sure that we see it 5089 * clear any fi_fds[] entries before we see it decrement 5090 * i_writecount: 5091 */ 5092 smp_mb__after_atomic(); 5093 5094 if (fp->fi_fds[O_WRONLY]) 5095 writes--; 5096 if (fp->fi_fds[O_RDWR]) 5097 writes--; 5098 if (writes > 0) 5099 return -EAGAIN; /* There may be non-NFSv4 writers */ 5100 /* 5101 * It's possible there are non-NFSv4 write opens in progress, 5102 * but if they haven't incremented i_writecount yet then they 5103 * also haven't called break lease yet; so, they'll break this 5104 * lease soon enough. So, all that's left to check for is NFSv4 5105 * opens: 5106 */ 5107 spin_lock(&fp->fi_lock); 5108 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 5109 if (st->st_openstp == NULL /* it's an open */ && 5110 access_permit_write(st) && 5111 st->st_stid.sc_client != clp) { 5112 spin_unlock(&fp->fi_lock); 5113 return -EAGAIN; 5114 } 5115 } 5116 spin_unlock(&fp->fi_lock); 5117 /* 5118 * There's a small chance that we could be racing with another 5119 * NFSv4 open. However, any open that hasn't added itself to 5120 * the fi_stateids list also hasn't called break_lease yet; so, 5121 * they'll break this lease soon enough. 5122 */ 5123 return 0; 5124 } 5125 5126 static struct nfs4_delegation * 5127 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, 5128 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) 5129 { 5130 int status = 0; 5131 struct nfs4_delegation *dp; 5132 struct nfsd_file *nf; 5133 struct file_lock *fl; 5134 5135 /* 5136 * The fi_had_conflict and nfs_get_existing_delegation checks 5137 * here are just optimizations; we'll need to recheck them at 5138 * the end: 5139 */ 5140 if (fp->fi_had_conflict) 5141 return ERR_PTR(-EAGAIN); 5142 5143 nf = find_readable_file(fp); 5144 if (!nf) { 5145 /* 5146 * We probably could attempt another open and get a read 5147 * delegation, but for now, don't bother until the 5148 * client actually sends us one. 5149 */ 5150 return ERR_PTR(-EAGAIN); 5151 } 5152 spin_lock(&state_lock); 5153 spin_lock(&fp->fi_lock); 5154 if (nfs4_delegation_exists(clp, fp)) 5155 status = -EAGAIN; 5156 else if (!fp->fi_deleg_file) { 5157 fp->fi_deleg_file = nf; 5158 /* increment early to prevent fi_deleg_file from being 5159 * cleared */ 5160 fp->fi_delegees = 1; 5161 nf = NULL; 5162 } else 5163 fp->fi_delegees++; 5164 spin_unlock(&fp->fi_lock); 5165 spin_unlock(&state_lock); 5166 if (nf) 5167 nfsd_file_put(nf); 5168 if (status) 5169 return ERR_PTR(status); 5170 5171 status = -ENOMEM; 5172 dp = alloc_init_deleg(clp, fp, fh, odstate); 5173 if (!dp) 5174 goto out_delegees; 5175 5176 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ); 5177 if (!fl) 5178 goto out_clnt_odstate; 5179 5180 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); 5181 if (fl) 5182 locks_free_lock(fl); 5183 if (status) 5184 goto out_clnt_odstate; 5185 status = nfsd4_check_conflicting_opens(clp, fp); 5186 if (status) 5187 goto out_unlock; 5188 5189 spin_lock(&state_lock); 5190 spin_lock(&fp->fi_lock); 5191 if (fp->fi_had_conflict) 5192 status = -EAGAIN; 5193 else 5194 status = hash_delegation_locked(dp, fp); 5195 spin_unlock(&fp->fi_lock); 5196 spin_unlock(&state_lock); 5197 5198 if (status) 5199 goto out_unlock; 5200 5201 return dp; 5202 out_unlock: 5203 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); 5204 out_clnt_odstate: 5205 put_clnt_odstate(dp->dl_clnt_odstate); 5206 nfs4_put_stid(&dp->dl_stid); 5207 out_delegees: 5208 put_deleg_file(fp); 5209 return ERR_PTR(status); 5210 } 5211 5212 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 5213 { 5214 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5215 if (status == -EAGAIN) 5216 open->op_why_no_deleg = WND4_CONTENTION; 5217 else { 5218 open->op_why_no_deleg = WND4_RESOURCE; 5219 switch (open->op_deleg_want) { 5220 case NFS4_SHARE_WANT_READ_DELEG: 5221 case NFS4_SHARE_WANT_WRITE_DELEG: 5222 case NFS4_SHARE_WANT_ANY_DELEG: 5223 break; 5224 case NFS4_SHARE_WANT_CANCEL: 5225 open->op_why_no_deleg = WND4_CANCELLED; 5226 break; 5227 case NFS4_SHARE_WANT_NO_DELEG: 5228 WARN_ON_ONCE(1); 5229 } 5230 } 5231 } 5232 5233 /* 5234 * Attempt to hand out a delegation. 5235 * 5236 * Note we don't support write delegations, and won't until the vfs has 5237 * proper support for them. 5238 */ 5239 static void 5240 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, 5241 struct nfs4_ol_stateid *stp) 5242 { 5243 struct nfs4_delegation *dp; 5244 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 5245 struct nfs4_client *clp = stp->st_stid.sc_client; 5246 int cb_up; 5247 int status = 0; 5248 5249 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 5250 open->op_recall = 0; 5251 switch (open->op_claim_type) { 5252 case NFS4_OPEN_CLAIM_PREVIOUS: 5253 if (!cb_up) 5254 open->op_recall = 1; 5255 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ) 5256 goto out_no_deleg; 5257 break; 5258 case NFS4_OPEN_CLAIM_NULL: 5259 case NFS4_OPEN_CLAIM_FH: 5260 /* 5261 * Let's not give out any delegations till everyone's 5262 * had the chance to reclaim theirs, *and* until 5263 * NLM locks have all been reclaimed: 5264 */ 5265 if (locks_in_grace(clp->net)) 5266 goto out_no_deleg; 5267 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 5268 goto out_no_deleg; 5269 break; 5270 default: 5271 goto out_no_deleg; 5272 } 5273 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); 5274 if (IS_ERR(dp)) 5275 goto out_no_deleg; 5276 5277 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 5278 5279 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); 5280 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 5281 nfs4_put_stid(&dp->dl_stid); 5282 return; 5283 out_no_deleg: 5284 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 5285 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 5286 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { 5287 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 5288 open->op_recall = 1; 5289 } 5290 5291 /* 4.1 client asking for a delegation? */ 5292 if (open->op_deleg_want) 5293 nfsd4_open_deleg_none_ext(open, status); 5294 return; 5295 } 5296 5297 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 5298 struct nfs4_delegation *dp) 5299 { 5300 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 5301 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5302 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5303 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 5304 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 5305 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5306 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5307 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 5308 } 5309 /* Otherwise the client must be confused wanting a delegation 5310 * it already has, therefore we don't return 5311 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 5312 */ 5313 } 5314 5315 __be32 5316 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 5317 { 5318 struct nfsd4_compoundres *resp = rqstp->rq_resp; 5319 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 5320 struct nfs4_file *fp = NULL; 5321 struct nfs4_ol_stateid *stp = NULL; 5322 struct nfs4_delegation *dp = NULL; 5323 __be32 status; 5324 bool new_stp = false; 5325 5326 /* 5327 * Lookup file; if found, lookup stateid and check open request, 5328 * and check for delegations in the process of being recalled. 5329 * If not found, create the nfs4_file struct 5330 */ 5331 fp = find_or_add_file(open->op_file, current_fh); 5332 if (fp != open->op_file) { 5333 status = nfs4_check_deleg(cl, open, &dp); 5334 if (status) 5335 goto out; 5336 stp = nfsd4_find_and_lock_existing_open(fp, open); 5337 } else { 5338 open->op_file = NULL; 5339 status = nfserr_bad_stateid; 5340 if (nfsd4_is_deleg_cur(open)) 5341 goto out; 5342 } 5343 5344 if (!stp) { 5345 stp = init_open_stateid(fp, open); 5346 if (!open->op_stp) 5347 new_stp = true; 5348 } 5349 5350 /* 5351 * OPEN the file, or upgrade an existing OPEN. 5352 * If truncate fails, the OPEN fails. 5353 * 5354 * stp is already locked. 5355 */ 5356 if (!new_stp) { 5357 /* Stateid was found, this is an OPEN upgrade */ 5358 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 5359 if (status) { 5360 mutex_unlock(&stp->st_mutex); 5361 goto out; 5362 } 5363 } else { 5364 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); 5365 if (status) { 5366 stp->st_stid.sc_type = NFS4_CLOSED_STID; 5367 release_open_stateid(stp); 5368 mutex_unlock(&stp->st_mutex); 5369 goto out; 5370 } 5371 5372 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 5373 open->op_odstate); 5374 if (stp->st_clnt_odstate == open->op_odstate) 5375 open->op_odstate = NULL; 5376 } 5377 5378 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 5379 mutex_unlock(&stp->st_mutex); 5380 5381 if (nfsd4_has_session(&resp->cstate)) { 5382 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 5383 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5384 open->op_why_no_deleg = WND4_NOT_WANTED; 5385 goto nodeleg; 5386 } 5387 } 5388 5389 /* 5390 * Attempt to hand out a delegation. No error return, because the 5391 * OPEN succeeds even if we fail. 5392 */ 5393 nfs4_open_delegation(current_fh, open, stp); 5394 nodeleg: 5395 status = nfs_ok; 5396 trace_nfsd_open(&stp->st_stid.sc_stateid); 5397 out: 5398 /* 4.1 client trying to upgrade/downgrade delegation? */ 5399 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 5400 open->op_deleg_want) 5401 nfsd4_deleg_xgrade_none_ext(open, dp); 5402 5403 if (fp) 5404 put_nfs4_file(fp); 5405 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 5406 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 5407 /* 5408 * To finish the open response, we just need to set the rflags. 5409 */ 5410 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 5411 if (nfsd4_has_session(&resp->cstate)) 5412 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; 5413 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) 5414 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 5415 5416 if (dp) 5417 nfs4_put_stid(&dp->dl_stid); 5418 if (stp) 5419 nfs4_put_stid(&stp->st_stid); 5420 5421 return status; 5422 } 5423 5424 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 5425 struct nfsd4_open *open) 5426 { 5427 if (open->op_openowner) { 5428 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; 5429 5430 nfsd4_cstate_assign_replay(cstate, so); 5431 nfs4_put_stateowner(so); 5432 } 5433 if (open->op_file) 5434 kmem_cache_free(file_slab, open->op_file); 5435 if (open->op_stp) 5436 nfs4_put_stid(&open->op_stp->st_stid); 5437 if (open->op_odstate) 5438 kmem_cache_free(odstate_slab, open->op_odstate); 5439 } 5440 5441 __be32 5442 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5443 union nfsd4_op_u *u) 5444 { 5445 clientid_t *clid = &u->renew; 5446 struct nfs4_client *clp; 5447 __be32 status; 5448 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5449 5450 trace_nfsd_clid_renew(clid); 5451 status = set_client(clid, cstate, nn); 5452 if (status) 5453 return status; 5454 clp = cstate->clp; 5455 if (!list_empty(&clp->cl_delegations) 5456 && clp->cl_cb_state != NFSD4_CB_UP) 5457 return nfserr_cb_path_down; 5458 return nfs_ok; 5459 } 5460 5461 void 5462 nfsd4_end_grace(struct nfsd_net *nn) 5463 { 5464 /* do nothing if grace period already ended */ 5465 if (nn->grace_ended) 5466 return; 5467 5468 trace_nfsd_grace_complete(nn); 5469 nn->grace_ended = true; 5470 /* 5471 * If the server goes down again right now, an NFSv4 5472 * client will still be allowed to reclaim after it comes back up, 5473 * even if it hasn't yet had a chance to reclaim state this time. 5474 * 5475 */ 5476 nfsd4_record_grace_done(nn); 5477 /* 5478 * At this point, NFSv4 clients can still reclaim. But if the 5479 * server crashes, any that have not yet reclaimed will be out 5480 * of luck on the next boot. 5481 * 5482 * (NFSv4.1+ clients are considered to have reclaimed once they 5483 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 5484 * have reclaimed after their first OPEN.) 5485 */ 5486 locks_end_grace(&nn->nfsd4_manager); 5487 /* 5488 * At this point, and once lockd and/or any other containers 5489 * exit their grace period, further reclaims will fail and 5490 * regular locking can resume. 5491 */ 5492 } 5493 5494 /* 5495 * If we've waited a lease period but there are still clients trying to 5496 * reclaim, wait a little longer to give them a chance to finish. 5497 */ 5498 static bool clients_still_reclaiming(struct nfsd_net *nn) 5499 { 5500 time64_t double_grace_period_end = nn->boot_time + 5501 2 * nn->nfsd4_lease; 5502 5503 if (nn->track_reclaim_completes && 5504 atomic_read(&nn->nr_reclaim_complete) == 5505 nn->reclaim_str_hashtbl_size) 5506 return false; 5507 if (!nn->somebody_reclaimed) 5508 return false; 5509 nn->somebody_reclaimed = false; 5510 /* 5511 * If we've given them *two* lease times to reclaim, and they're 5512 * still not done, give up: 5513 */ 5514 if (ktime_get_boottime_seconds() > double_grace_period_end) 5515 return false; 5516 return true; 5517 } 5518 5519 struct laundry_time { 5520 time64_t cutoff; 5521 time64_t new_timeo; 5522 }; 5523 5524 static bool state_expired(struct laundry_time *lt, time64_t last_refresh) 5525 { 5526 time64_t time_remaining; 5527 5528 if (last_refresh < lt->cutoff) 5529 return true; 5530 time_remaining = last_refresh - lt->cutoff; 5531 lt->new_timeo = min(lt->new_timeo, time_remaining); 5532 return false; 5533 } 5534 5535 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 5536 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn) 5537 { 5538 spin_lock_init(&nn->nfsd_ssc_lock); 5539 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); 5540 init_waitqueue_head(&nn->nfsd_ssc_waitq); 5541 } 5542 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work); 5543 5544 /* 5545 * This is called when nfsd is being shutdown, after all inter_ssc 5546 * cleanup were done, to destroy the ssc delayed unmount list. 5547 */ 5548 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn) 5549 { 5550 struct nfsd4_ssc_umount_item *ni = NULL; 5551 struct nfsd4_ssc_umount_item *tmp; 5552 5553 spin_lock(&nn->nfsd_ssc_lock); 5554 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 5555 list_del(&ni->nsui_list); 5556 spin_unlock(&nn->nfsd_ssc_lock); 5557 mntput(ni->nsui_vfsmount); 5558 kfree(ni); 5559 spin_lock(&nn->nfsd_ssc_lock); 5560 } 5561 spin_unlock(&nn->nfsd_ssc_lock); 5562 } 5563 5564 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn) 5565 { 5566 bool do_wakeup = false; 5567 struct nfsd4_ssc_umount_item *ni = NULL; 5568 struct nfsd4_ssc_umount_item *tmp; 5569 5570 spin_lock(&nn->nfsd_ssc_lock); 5571 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 5572 if (time_after(jiffies, ni->nsui_expire)) { 5573 if (refcount_read(&ni->nsui_refcnt) > 1) 5574 continue; 5575 5576 /* mark being unmount */ 5577 ni->nsui_busy = true; 5578 spin_unlock(&nn->nfsd_ssc_lock); 5579 mntput(ni->nsui_vfsmount); 5580 spin_lock(&nn->nfsd_ssc_lock); 5581 5582 /* waiters need to start from begin of list */ 5583 list_del(&ni->nsui_list); 5584 kfree(ni); 5585 5586 /* wakeup ssc_connect waiters */ 5587 do_wakeup = true; 5588 continue; 5589 } 5590 break; 5591 } 5592 if (do_wakeup) 5593 wake_up_all(&nn->nfsd_ssc_waitq); 5594 spin_unlock(&nn->nfsd_ssc_lock); 5595 } 5596 #endif 5597 5598 static time64_t 5599 nfs4_laundromat(struct nfsd_net *nn) 5600 { 5601 struct nfs4_client *clp; 5602 struct nfs4_openowner *oo; 5603 struct nfs4_delegation *dp; 5604 struct nfs4_ol_stateid *stp; 5605 struct nfsd4_blocked_lock *nbl; 5606 struct list_head *pos, *next, reaplist; 5607 struct laundry_time lt = { 5608 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, 5609 .new_timeo = nn->nfsd4_lease 5610 }; 5611 struct nfs4_cpntf_state *cps; 5612 copy_stateid_t *cps_t; 5613 int i; 5614 5615 if (clients_still_reclaiming(nn)) { 5616 lt.new_timeo = 0; 5617 goto out; 5618 } 5619 nfsd4_end_grace(nn); 5620 INIT_LIST_HEAD(&reaplist); 5621 5622 spin_lock(&nn->s2s_cp_lock); 5623 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 5624 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 5625 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID && 5626 state_expired(<, cps->cpntf_time)) 5627 _free_cpntf_state_locked(nn, cps); 5628 } 5629 spin_unlock(&nn->s2s_cp_lock); 5630 5631 spin_lock(&nn->client_lock); 5632 list_for_each_safe(pos, next, &nn->client_lru) { 5633 clp = list_entry(pos, struct nfs4_client, cl_lru); 5634 if (!state_expired(<, clp->cl_time)) 5635 break; 5636 if (mark_client_expired_locked(clp)) 5637 continue; 5638 list_add(&clp->cl_lru, &reaplist); 5639 } 5640 spin_unlock(&nn->client_lock); 5641 list_for_each_safe(pos, next, &reaplist) { 5642 clp = list_entry(pos, struct nfs4_client, cl_lru); 5643 trace_nfsd_clid_purged(&clp->cl_clientid); 5644 list_del_init(&clp->cl_lru); 5645 expire_client(clp); 5646 } 5647 spin_lock(&state_lock); 5648 list_for_each_safe(pos, next, &nn->del_recall_lru) { 5649 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5650 if (!state_expired(<, dp->dl_time)) 5651 break; 5652 WARN_ON(!unhash_delegation_locked(dp)); 5653 list_add(&dp->dl_recall_lru, &reaplist); 5654 } 5655 spin_unlock(&state_lock); 5656 while (!list_empty(&reaplist)) { 5657 dp = list_first_entry(&reaplist, struct nfs4_delegation, 5658 dl_recall_lru); 5659 list_del_init(&dp->dl_recall_lru); 5660 revoke_delegation(dp); 5661 } 5662 5663 spin_lock(&nn->client_lock); 5664 while (!list_empty(&nn->close_lru)) { 5665 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 5666 oo_close_lru); 5667 if (!state_expired(<, oo->oo_time)) 5668 break; 5669 list_del_init(&oo->oo_close_lru); 5670 stp = oo->oo_last_closed_stid; 5671 oo->oo_last_closed_stid = NULL; 5672 spin_unlock(&nn->client_lock); 5673 nfs4_put_stid(&stp->st_stid); 5674 spin_lock(&nn->client_lock); 5675 } 5676 spin_unlock(&nn->client_lock); 5677 5678 /* 5679 * It's possible for a client to try and acquire an already held lock 5680 * that is being held for a long time, and then lose interest in it. 5681 * So, we clean out any un-revisited request after a lease period 5682 * under the assumption that the client is no longer interested. 5683 * 5684 * RFC5661, sec. 9.6 states that the client must not rely on getting 5685 * notifications and must continue to poll for locks, even when the 5686 * server supports them. Thus this shouldn't lead to clients blocking 5687 * indefinitely once the lock does become free. 5688 */ 5689 BUG_ON(!list_empty(&reaplist)); 5690 spin_lock(&nn->blocked_locks_lock); 5691 while (!list_empty(&nn->blocked_locks_lru)) { 5692 nbl = list_first_entry(&nn->blocked_locks_lru, 5693 struct nfsd4_blocked_lock, nbl_lru); 5694 if (!state_expired(<, nbl->nbl_time)) 5695 break; 5696 list_move(&nbl->nbl_lru, &reaplist); 5697 list_del_init(&nbl->nbl_list); 5698 } 5699 spin_unlock(&nn->blocked_locks_lock); 5700 5701 while (!list_empty(&reaplist)) { 5702 nbl = list_first_entry(&reaplist, 5703 struct nfsd4_blocked_lock, nbl_lru); 5704 list_del_init(&nbl->nbl_lru); 5705 free_blocked_lock(nbl); 5706 } 5707 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 5708 /* service the server-to-server copy delayed unmount list */ 5709 nfsd4_ssc_expire_umount(nn); 5710 #endif 5711 out: 5712 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 5713 } 5714 5715 static struct workqueue_struct *laundry_wq; 5716 static void laundromat_main(struct work_struct *); 5717 5718 static void 5719 laundromat_main(struct work_struct *laundry) 5720 { 5721 time64_t t; 5722 struct delayed_work *dwork = to_delayed_work(laundry); 5723 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 5724 laundromat_work); 5725 5726 t = nfs4_laundromat(nn); 5727 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 5728 } 5729 5730 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) 5731 { 5732 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) 5733 return nfserr_bad_stateid; 5734 return nfs_ok; 5735 } 5736 5737 static 5738 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 5739 { 5740 __be32 status = nfserr_openmode; 5741 5742 /* For lock stateid's, we test the parent open, not the lock: */ 5743 if (stp->st_openstp) 5744 stp = stp->st_openstp; 5745 if ((flags & WR_STATE) && !access_permit_write(stp)) 5746 goto out; 5747 if ((flags & RD_STATE) && !access_permit_read(stp)) 5748 goto out; 5749 status = nfs_ok; 5750 out: 5751 return status; 5752 } 5753 5754 static inline __be32 5755 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 5756 { 5757 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 5758 return nfs_ok; 5759 else if (opens_in_grace(net)) { 5760 /* Answer in remaining cases depends on existence of 5761 * conflicting state; so we must wait out the grace period. */ 5762 return nfserr_grace; 5763 } else if (flags & WR_STATE) 5764 return nfs4_share_conflict(current_fh, 5765 NFS4_SHARE_DENY_WRITE); 5766 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 5767 return nfs4_share_conflict(current_fh, 5768 NFS4_SHARE_DENY_READ); 5769 } 5770 5771 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 5772 { 5773 /* 5774 * When sessions are used the stateid generation number is ignored 5775 * when it is zero. 5776 */ 5777 if (has_session && in->si_generation == 0) 5778 return nfs_ok; 5779 5780 if (in->si_generation == ref->si_generation) 5781 return nfs_ok; 5782 5783 /* If the client sends us a stateid from the future, it's buggy: */ 5784 if (nfsd4_stateid_generation_after(in, ref)) 5785 return nfserr_bad_stateid; 5786 /* 5787 * However, we could see a stateid from the past, even from a 5788 * non-buggy client. For example, if the client sends a lock 5789 * while some IO is outstanding, the lock may bump si_generation 5790 * while the IO is still in flight. The client could avoid that 5791 * situation by waiting for responses on all the IO requests, 5792 * but better performance may result in retrying IO that 5793 * receives an old_stateid error if requests are rarely 5794 * reordered in flight: 5795 */ 5796 return nfserr_old_stateid; 5797 } 5798 5799 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session) 5800 { 5801 __be32 ret; 5802 5803 spin_lock(&s->sc_lock); 5804 ret = nfsd4_verify_open_stid(s); 5805 if (ret == nfs_ok) 5806 ret = check_stateid_generation(in, &s->sc_stateid, has_session); 5807 spin_unlock(&s->sc_lock); 5808 return ret; 5809 } 5810 5811 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 5812 { 5813 if (ols->st_stateowner->so_is_open_owner && 5814 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 5815 return nfserr_bad_stateid; 5816 return nfs_ok; 5817 } 5818 5819 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 5820 { 5821 struct nfs4_stid *s; 5822 __be32 status = nfserr_bad_stateid; 5823 5824 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 5825 CLOSE_STATEID(stateid)) 5826 return status; 5827 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) 5828 return status; 5829 spin_lock(&cl->cl_lock); 5830 s = find_stateid_locked(cl, stateid); 5831 if (!s) 5832 goto out_unlock; 5833 status = nfsd4_stid_check_stateid_generation(stateid, s, 1); 5834 if (status) 5835 goto out_unlock; 5836 switch (s->sc_type) { 5837 case NFS4_DELEG_STID: 5838 status = nfs_ok; 5839 break; 5840 case NFS4_REVOKED_DELEG_STID: 5841 status = nfserr_deleg_revoked; 5842 break; 5843 case NFS4_OPEN_STID: 5844 case NFS4_LOCK_STID: 5845 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 5846 break; 5847 default: 5848 printk("unknown stateid type %x\n", s->sc_type); 5849 fallthrough; 5850 case NFS4_CLOSED_STID: 5851 case NFS4_CLOSED_DELEG_STID: 5852 status = nfserr_bad_stateid; 5853 } 5854 out_unlock: 5855 spin_unlock(&cl->cl_lock); 5856 return status; 5857 } 5858 5859 __be32 5860 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 5861 stateid_t *stateid, unsigned char typemask, 5862 struct nfs4_stid **s, struct nfsd_net *nn) 5863 { 5864 __be32 status; 5865 bool return_revoked = false; 5866 5867 /* 5868 * only return revoked delegations if explicitly asked. 5869 * otherwise we report revoked or bad_stateid status. 5870 */ 5871 if (typemask & NFS4_REVOKED_DELEG_STID) 5872 return_revoked = true; 5873 else if (typemask & NFS4_DELEG_STID) 5874 typemask |= NFS4_REVOKED_DELEG_STID; 5875 5876 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 5877 CLOSE_STATEID(stateid)) 5878 return nfserr_bad_stateid; 5879 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); 5880 if (status == nfserr_stale_clientid) { 5881 if (cstate->session) 5882 return nfserr_bad_stateid; 5883 return nfserr_stale_stateid; 5884 } 5885 if (status) 5886 return status; 5887 *s = find_stateid_by_type(cstate->clp, stateid, typemask); 5888 if (!*s) 5889 return nfserr_bad_stateid; 5890 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { 5891 nfs4_put_stid(*s); 5892 if (cstate->minorversion) 5893 return nfserr_deleg_revoked; 5894 return nfserr_bad_stateid; 5895 } 5896 return nfs_ok; 5897 } 5898 5899 static struct nfsd_file * 5900 nfs4_find_file(struct nfs4_stid *s, int flags) 5901 { 5902 if (!s) 5903 return NULL; 5904 5905 switch (s->sc_type) { 5906 case NFS4_DELEG_STID: 5907 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file)) 5908 return NULL; 5909 return nfsd_file_get(s->sc_file->fi_deleg_file); 5910 case NFS4_OPEN_STID: 5911 case NFS4_LOCK_STID: 5912 if (flags & RD_STATE) 5913 return find_readable_file(s->sc_file); 5914 else 5915 return find_writeable_file(s->sc_file); 5916 } 5917 5918 return NULL; 5919 } 5920 5921 static __be32 5922 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) 5923 { 5924 __be32 status; 5925 5926 status = nfsd4_check_openowner_confirmed(ols); 5927 if (status) 5928 return status; 5929 return nfs4_check_openmode(ols, flags); 5930 } 5931 5932 static __be32 5933 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, 5934 struct nfsd_file **nfp, int flags) 5935 { 5936 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; 5937 struct nfsd_file *nf; 5938 __be32 status; 5939 5940 nf = nfs4_find_file(s, flags); 5941 if (nf) { 5942 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, 5943 acc | NFSD_MAY_OWNER_OVERRIDE); 5944 if (status) { 5945 nfsd_file_put(nf); 5946 goto out; 5947 } 5948 } else { 5949 status = nfsd_file_acquire(rqstp, fhp, acc, &nf); 5950 if (status) 5951 return status; 5952 } 5953 *nfp = nf; 5954 out: 5955 return status; 5956 } 5957 static void 5958 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 5959 { 5960 WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID); 5961 if (!refcount_dec_and_test(&cps->cp_stateid.sc_count)) 5962 return; 5963 list_del(&cps->cp_list); 5964 idr_remove(&nn->s2s_cp_stateids, 5965 cps->cp_stateid.stid.si_opaque.so_id); 5966 kfree(cps); 5967 } 5968 /* 5969 * A READ from an inter server to server COPY will have a 5970 * copy stateid. Look up the copy notify stateid from the 5971 * idr structure and take a reference on it. 5972 */ 5973 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 5974 struct nfs4_client *clp, 5975 struct nfs4_cpntf_state **cps) 5976 { 5977 copy_stateid_t *cps_t; 5978 struct nfs4_cpntf_state *state = NULL; 5979 5980 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) 5981 return nfserr_bad_stateid; 5982 spin_lock(&nn->s2s_cp_lock); 5983 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); 5984 if (cps_t) { 5985 state = container_of(cps_t, struct nfs4_cpntf_state, 5986 cp_stateid); 5987 if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) { 5988 state = NULL; 5989 goto unlock; 5990 } 5991 if (!clp) 5992 refcount_inc(&state->cp_stateid.sc_count); 5993 else 5994 _free_cpntf_state_locked(nn, state); 5995 } 5996 unlock: 5997 spin_unlock(&nn->s2s_cp_lock); 5998 if (!state) 5999 return nfserr_bad_stateid; 6000 if (!clp && state) 6001 *cps = state; 6002 return 0; 6003 } 6004 6005 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, 6006 struct nfs4_stid **stid) 6007 { 6008 __be32 status; 6009 struct nfs4_cpntf_state *cps = NULL; 6010 struct nfs4_client *found; 6011 6012 status = manage_cpntf_state(nn, st, NULL, &cps); 6013 if (status) 6014 return status; 6015 6016 cps->cpntf_time = ktime_get_boottime_seconds(); 6017 6018 status = nfserr_expired; 6019 found = lookup_clientid(&cps->cp_p_clid, true, nn); 6020 if (!found) 6021 goto out; 6022 6023 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, 6024 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID); 6025 if (*stid) 6026 status = nfs_ok; 6027 else 6028 status = nfserr_bad_stateid; 6029 6030 put_client_renew(found); 6031 out: 6032 nfs4_put_cpntf_state(nn, cps); 6033 return status; 6034 } 6035 6036 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 6037 { 6038 spin_lock(&nn->s2s_cp_lock); 6039 _free_cpntf_state_locked(nn, cps); 6040 spin_unlock(&nn->s2s_cp_lock); 6041 } 6042 6043 /* 6044 * Checks for stateid operations 6045 */ 6046 __be32 6047 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 6048 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 6049 stateid_t *stateid, int flags, struct nfsd_file **nfp, 6050 struct nfs4_stid **cstid) 6051 { 6052 struct net *net = SVC_NET(rqstp); 6053 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6054 struct nfs4_stid *s = NULL; 6055 __be32 status; 6056 6057 if (nfp) 6058 *nfp = NULL; 6059 6060 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 6061 if (cstid) 6062 status = nfserr_bad_stateid; 6063 else 6064 status = check_special_stateids(net, fhp, stateid, 6065 flags); 6066 goto done; 6067 } 6068 6069 status = nfsd4_lookup_stateid(cstate, stateid, 6070 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 6071 &s, nn); 6072 if (status == nfserr_bad_stateid) 6073 status = find_cpntf_state(nn, stateid, &s); 6074 if (status) 6075 return status; 6076 status = nfsd4_stid_check_stateid_generation(stateid, s, 6077 nfsd4_has_session(cstate)); 6078 if (status) 6079 goto out; 6080 6081 switch (s->sc_type) { 6082 case NFS4_DELEG_STID: 6083 status = nfs4_check_delegmode(delegstateid(s), flags); 6084 break; 6085 case NFS4_OPEN_STID: 6086 case NFS4_LOCK_STID: 6087 status = nfs4_check_olstateid(openlockstateid(s), flags); 6088 break; 6089 default: 6090 status = nfserr_bad_stateid; 6091 break; 6092 } 6093 if (status) 6094 goto out; 6095 status = nfs4_check_fh(fhp, s); 6096 6097 done: 6098 if (status == nfs_ok && nfp) 6099 status = nfs4_check_file(rqstp, fhp, s, nfp, flags); 6100 out: 6101 if (s) { 6102 if (!status && cstid) 6103 *cstid = s; 6104 else 6105 nfs4_put_stid(s); 6106 } 6107 return status; 6108 } 6109 6110 /* 6111 * Test if the stateid is valid 6112 */ 6113 __be32 6114 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6115 union nfsd4_op_u *u) 6116 { 6117 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; 6118 struct nfsd4_test_stateid_id *stateid; 6119 struct nfs4_client *cl = cstate->clp; 6120 6121 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 6122 stateid->ts_id_status = 6123 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 6124 6125 return nfs_ok; 6126 } 6127 6128 static __be32 6129 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 6130 { 6131 struct nfs4_ol_stateid *stp = openlockstateid(s); 6132 __be32 ret; 6133 6134 ret = nfsd4_lock_ol_stateid(stp); 6135 if (ret) 6136 goto out_put_stid; 6137 6138 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 6139 if (ret) 6140 goto out; 6141 6142 ret = nfserr_locks_held; 6143 if (check_for_locks(stp->st_stid.sc_file, 6144 lockowner(stp->st_stateowner))) 6145 goto out; 6146 6147 release_lock_stateid(stp); 6148 ret = nfs_ok; 6149 6150 out: 6151 mutex_unlock(&stp->st_mutex); 6152 out_put_stid: 6153 nfs4_put_stid(s); 6154 return ret; 6155 } 6156 6157 __be32 6158 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6159 union nfsd4_op_u *u) 6160 { 6161 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; 6162 stateid_t *stateid = &free_stateid->fr_stateid; 6163 struct nfs4_stid *s; 6164 struct nfs4_delegation *dp; 6165 struct nfs4_client *cl = cstate->clp; 6166 __be32 ret = nfserr_bad_stateid; 6167 6168 spin_lock(&cl->cl_lock); 6169 s = find_stateid_locked(cl, stateid); 6170 if (!s) 6171 goto out_unlock; 6172 spin_lock(&s->sc_lock); 6173 switch (s->sc_type) { 6174 case NFS4_DELEG_STID: 6175 ret = nfserr_locks_held; 6176 break; 6177 case NFS4_OPEN_STID: 6178 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 6179 if (ret) 6180 break; 6181 ret = nfserr_locks_held; 6182 break; 6183 case NFS4_LOCK_STID: 6184 spin_unlock(&s->sc_lock); 6185 refcount_inc(&s->sc_count); 6186 spin_unlock(&cl->cl_lock); 6187 ret = nfsd4_free_lock_stateid(stateid, s); 6188 goto out; 6189 case NFS4_REVOKED_DELEG_STID: 6190 spin_unlock(&s->sc_lock); 6191 dp = delegstateid(s); 6192 list_del_init(&dp->dl_recall_lru); 6193 spin_unlock(&cl->cl_lock); 6194 nfs4_put_stid(s); 6195 ret = nfs_ok; 6196 goto out; 6197 /* Default falls through and returns nfserr_bad_stateid */ 6198 } 6199 spin_unlock(&s->sc_lock); 6200 out_unlock: 6201 spin_unlock(&cl->cl_lock); 6202 out: 6203 return ret; 6204 } 6205 6206 static inline int 6207 setlkflg (int type) 6208 { 6209 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 6210 RD_STATE : WR_STATE; 6211 } 6212 6213 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 6214 { 6215 struct svc_fh *current_fh = &cstate->current_fh; 6216 struct nfs4_stateowner *sop = stp->st_stateowner; 6217 __be32 status; 6218 6219 status = nfsd4_check_seqid(cstate, sop, seqid); 6220 if (status) 6221 return status; 6222 status = nfsd4_lock_ol_stateid(stp); 6223 if (status != nfs_ok) 6224 return status; 6225 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 6226 if (status == nfs_ok) 6227 status = nfs4_check_fh(current_fh, &stp->st_stid); 6228 if (status != nfs_ok) 6229 mutex_unlock(&stp->st_mutex); 6230 return status; 6231 } 6232 6233 /* 6234 * Checks for sequence id mutating operations. 6235 */ 6236 static __be32 6237 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 6238 stateid_t *stateid, char typemask, 6239 struct nfs4_ol_stateid **stpp, 6240 struct nfsd_net *nn) 6241 { 6242 __be32 status; 6243 struct nfs4_stid *s; 6244 struct nfs4_ol_stateid *stp = NULL; 6245 6246 trace_nfsd_preprocess(seqid, stateid); 6247 6248 *stpp = NULL; 6249 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); 6250 if (status) 6251 return status; 6252 stp = openlockstateid(s); 6253 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); 6254 6255 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 6256 if (!status) 6257 *stpp = stp; 6258 else 6259 nfs4_put_stid(&stp->st_stid); 6260 return status; 6261 } 6262 6263 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 6264 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 6265 { 6266 __be32 status; 6267 struct nfs4_openowner *oo; 6268 struct nfs4_ol_stateid *stp; 6269 6270 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 6271 NFS4_OPEN_STID, &stp, nn); 6272 if (status) 6273 return status; 6274 oo = openowner(stp->st_stateowner); 6275 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 6276 mutex_unlock(&stp->st_mutex); 6277 nfs4_put_stid(&stp->st_stid); 6278 return nfserr_bad_stateid; 6279 } 6280 *stpp = stp; 6281 return nfs_ok; 6282 } 6283 6284 __be32 6285 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6286 union nfsd4_op_u *u) 6287 { 6288 struct nfsd4_open_confirm *oc = &u->open_confirm; 6289 __be32 status; 6290 struct nfs4_openowner *oo; 6291 struct nfs4_ol_stateid *stp; 6292 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6293 6294 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 6295 cstate->current_fh.fh_dentry); 6296 6297 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 6298 if (status) 6299 return status; 6300 6301 status = nfs4_preprocess_seqid_op(cstate, 6302 oc->oc_seqid, &oc->oc_req_stateid, 6303 NFS4_OPEN_STID, &stp, nn); 6304 if (status) 6305 goto out; 6306 oo = openowner(stp->st_stateowner); 6307 status = nfserr_bad_stateid; 6308 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 6309 mutex_unlock(&stp->st_mutex); 6310 goto put_stateid; 6311 } 6312 oo->oo_flags |= NFS4_OO_CONFIRMED; 6313 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 6314 mutex_unlock(&stp->st_mutex); 6315 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); 6316 nfsd4_client_record_create(oo->oo_owner.so_client); 6317 status = nfs_ok; 6318 put_stateid: 6319 nfs4_put_stid(&stp->st_stid); 6320 out: 6321 nfsd4_bump_seqid(cstate, status); 6322 return status; 6323 } 6324 6325 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 6326 { 6327 if (!test_access(access, stp)) 6328 return; 6329 nfs4_file_put_access(stp->st_stid.sc_file, access); 6330 clear_access(access, stp); 6331 } 6332 6333 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 6334 { 6335 switch (to_access) { 6336 case NFS4_SHARE_ACCESS_READ: 6337 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 6338 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 6339 break; 6340 case NFS4_SHARE_ACCESS_WRITE: 6341 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 6342 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 6343 break; 6344 case NFS4_SHARE_ACCESS_BOTH: 6345 break; 6346 default: 6347 WARN_ON_ONCE(1); 6348 } 6349 } 6350 6351 __be32 6352 nfsd4_open_downgrade(struct svc_rqst *rqstp, 6353 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 6354 { 6355 struct nfsd4_open_downgrade *od = &u->open_downgrade; 6356 __be32 status; 6357 struct nfs4_ol_stateid *stp; 6358 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6359 6360 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 6361 cstate->current_fh.fh_dentry); 6362 6363 /* We don't yet support WANT bits: */ 6364 if (od->od_deleg_want) 6365 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 6366 od->od_deleg_want); 6367 6368 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 6369 &od->od_stateid, &stp, nn); 6370 if (status) 6371 goto out; 6372 status = nfserr_inval; 6373 if (!test_access(od->od_share_access, stp)) { 6374 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 6375 stp->st_access_bmap, od->od_share_access); 6376 goto put_stateid; 6377 } 6378 if (!test_deny(od->od_share_deny, stp)) { 6379 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 6380 stp->st_deny_bmap, od->od_share_deny); 6381 goto put_stateid; 6382 } 6383 nfs4_stateid_downgrade(stp, od->od_share_access); 6384 reset_union_bmap_deny(od->od_share_deny, stp); 6385 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 6386 status = nfs_ok; 6387 put_stateid: 6388 mutex_unlock(&stp->st_mutex); 6389 nfs4_put_stid(&stp->st_stid); 6390 out: 6391 nfsd4_bump_seqid(cstate, status); 6392 return status; 6393 } 6394 6395 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 6396 { 6397 struct nfs4_client *clp = s->st_stid.sc_client; 6398 bool unhashed; 6399 LIST_HEAD(reaplist); 6400 6401 spin_lock(&clp->cl_lock); 6402 unhashed = unhash_open_stateid(s, &reaplist); 6403 6404 if (clp->cl_minorversion) { 6405 if (unhashed) 6406 put_ol_stateid_locked(s, &reaplist); 6407 spin_unlock(&clp->cl_lock); 6408 free_ol_stateid_reaplist(&reaplist); 6409 } else { 6410 spin_unlock(&clp->cl_lock); 6411 free_ol_stateid_reaplist(&reaplist); 6412 if (unhashed) 6413 move_to_close_lru(s, clp->net); 6414 } 6415 } 6416 6417 /* 6418 * nfs4_unlock_state() called after encode 6419 */ 6420 __be32 6421 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6422 union nfsd4_op_u *u) 6423 { 6424 struct nfsd4_close *close = &u->close; 6425 __be32 status; 6426 struct nfs4_ol_stateid *stp; 6427 struct net *net = SVC_NET(rqstp); 6428 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6429 6430 dprintk("NFSD: nfsd4_close on file %pd\n", 6431 cstate->current_fh.fh_dentry); 6432 6433 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 6434 &close->cl_stateid, 6435 NFS4_OPEN_STID|NFS4_CLOSED_STID, 6436 &stp, nn); 6437 nfsd4_bump_seqid(cstate, status); 6438 if (status) 6439 goto out; 6440 6441 stp->st_stid.sc_type = NFS4_CLOSED_STID; 6442 6443 /* 6444 * Technically we don't _really_ have to increment or copy it, since 6445 * it should just be gone after this operation and we clobber the 6446 * copied value below, but we continue to do so here just to ensure 6447 * that racing ops see that there was a state change. 6448 */ 6449 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 6450 6451 nfsd4_close_open_stateid(stp); 6452 mutex_unlock(&stp->st_mutex); 6453 6454 /* v4.1+ suggests that we send a special stateid in here, since the 6455 * clients should just ignore this anyway. Since this is not useful 6456 * for v4.0 clients either, we set it to the special close_stateid 6457 * universally. 6458 * 6459 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5 6460 */ 6461 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); 6462 6463 /* put reference from nfs4_preprocess_seqid_op */ 6464 nfs4_put_stid(&stp->st_stid); 6465 out: 6466 return status; 6467 } 6468 6469 __be32 6470 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6471 union nfsd4_op_u *u) 6472 { 6473 struct nfsd4_delegreturn *dr = &u->delegreturn; 6474 struct nfs4_delegation *dp; 6475 stateid_t *stateid = &dr->dr_stateid; 6476 struct nfs4_stid *s; 6477 __be32 status; 6478 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6479 6480 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 6481 return status; 6482 6483 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); 6484 if (status) 6485 goto out; 6486 dp = delegstateid(s); 6487 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); 6488 if (status) 6489 goto put_stateid; 6490 6491 destroy_delegation(dp); 6492 put_stateid: 6493 nfs4_put_stid(&dp->dl_stid); 6494 out: 6495 return status; 6496 } 6497 6498 /* last octet in a range */ 6499 static inline u64 6500 last_byte_offset(u64 start, u64 len) 6501 { 6502 u64 end; 6503 6504 WARN_ON_ONCE(!len); 6505 end = start + len; 6506 return end > start ? end - 1: NFS4_MAX_UINT64; 6507 } 6508 6509 /* 6510 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 6511 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 6512 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 6513 * locking, this prevents us from being completely protocol-compliant. The 6514 * real solution to this problem is to start using unsigned file offsets in 6515 * the VFS, but this is a very deep change! 6516 */ 6517 static inline void 6518 nfs4_transform_lock_offset(struct file_lock *lock) 6519 { 6520 if (lock->fl_start < 0) 6521 lock->fl_start = OFFSET_MAX; 6522 if (lock->fl_end < 0) 6523 lock->fl_end = OFFSET_MAX; 6524 } 6525 6526 static fl_owner_t 6527 nfsd4_fl_get_owner(fl_owner_t owner) 6528 { 6529 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 6530 6531 nfs4_get_stateowner(&lo->lo_owner); 6532 return owner; 6533 } 6534 6535 static void 6536 nfsd4_fl_put_owner(fl_owner_t owner) 6537 { 6538 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 6539 6540 if (lo) 6541 nfs4_put_stateowner(&lo->lo_owner); 6542 } 6543 6544 static void 6545 nfsd4_lm_notify(struct file_lock *fl) 6546 { 6547 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; 6548 struct net *net = lo->lo_owner.so_client->net; 6549 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6550 struct nfsd4_blocked_lock *nbl = container_of(fl, 6551 struct nfsd4_blocked_lock, nbl_lock); 6552 bool queue = false; 6553 6554 /* An empty list means that something else is going to be using it */ 6555 spin_lock(&nn->blocked_locks_lock); 6556 if (!list_empty(&nbl->nbl_list)) { 6557 list_del_init(&nbl->nbl_list); 6558 list_del_init(&nbl->nbl_lru); 6559 queue = true; 6560 } 6561 spin_unlock(&nn->blocked_locks_lock); 6562 6563 if (queue) { 6564 trace_nfsd_cb_notify_lock(lo, nbl); 6565 nfsd4_run_cb(&nbl->nbl_cb); 6566 } 6567 } 6568 6569 static const struct lock_manager_operations nfsd_posix_mng_ops = { 6570 .lm_notify = nfsd4_lm_notify, 6571 .lm_get_owner = nfsd4_fl_get_owner, 6572 .lm_put_owner = nfsd4_fl_put_owner, 6573 }; 6574 6575 static inline void 6576 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 6577 { 6578 struct nfs4_lockowner *lo; 6579 6580 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 6581 lo = (struct nfs4_lockowner *) fl->fl_owner; 6582 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, 6583 GFP_KERNEL); 6584 if (!deny->ld_owner.data) 6585 /* We just don't care that much */ 6586 goto nevermind; 6587 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 6588 } else { 6589 nevermind: 6590 deny->ld_owner.len = 0; 6591 deny->ld_owner.data = NULL; 6592 deny->ld_clientid.cl_boot = 0; 6593 deny->ld_clientid.cl_id = 0; 6594 } 6595 deny->ld_start = fl->fl_start; 6596 deny->ld_length = NFS4_MAX_UINT64; 6597 if (fl->fl_end != NFS4_MAX_UINT64) 6598 deny->ld_length = fl->fl_end - fl->fl_start + 1; 6599 deny->ld_type = NFS4_READ_LT; 6600 if (fl->fl_type != F_RDLCK) 6601 deny->ld_type = NFS4_WRITE_LT; 6602 } 6603 6604 static struct nfs4_lockowner * 6605 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) 6606 { 6607 unsigned int strhashval = ownerstr_hashval(owner); 6608 struct nfs4_stateowner *so; 6609 6610 lockdep_assert_held(&clp->cl_lock); 6611 6612 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 6613 so_strhash) { 6614 if (so->so_is_open_owner) 6615 continue; 6616 if (same_owner_str(so, owner)) 6617 return lockowner(nfs4_get_stateowner(so)); 6618 } 6619 return NULL; 6620 } 6621 6622 static struct nfs4_lockowner * 6623 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) 6624 { 6625 struct nfs4_lockowner *lo; 6626 6627 spin_lock(&clp->cl_lock); 6628 lo = find_lockowner_str_locked(clp, owner); 6629 spin_unlock(&clp->cl_lock); 6630 return lo; 6631 } 6632 6633 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 6634 { 6635 unhash_lockowner_locked(lockowner(sop)); 6636 } 6637 6638 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 6639 { 6640 struct nfs4_lockowner *lo = lockowner(sop); 6641 6642 kmem_cache_free(lockowner_slab, lo); 6643 } 6644 6645 static const struct nfs4_stateowner_operations lockowner_ops = { 6646 .so_unhash = nfs4_unhash_lockowner, 6647 .so_free = nfs4_free_lockowner, 6648 }; 6649 6650 /* 6651 * Alloc a lock owner structure. 6652 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 6653 * occurred. 6654 * 6655 * strhashval = ownerstr_hashval 6656 */ 6657 static struct nfs4_lockowner * 6658 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 6659 struct nfs4_ol_stateid *open_stp, 6660 struct nfsd4_lock *lock) 6661 { 6662 struct nfs4_lockowner *lo, *ret; 6663 6664 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 6665 if (!lo) 6666 return NULL; 6667 INIT_LIST_HEAD(&lo->lo_blocked); 6668 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 6669 lo->lo_owner.so_is_open_owner = 0; 6670 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 6671 lo->lo_owner.so_ops = &lockowner_ops; 6672 spin_lock(&clp->cl_lock); 6673 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); 6674 if (ret == NULL) { 6675 list_add(&lo->lo_owner.so_strhash, 6676 &clp->cl_ownerstr_hashtbl[strhashval]); 6677 ret = lo; 6678 } else 6679 nfs4_free_stateowner(&lo->lo_owner); 6680 6681 spin_unlock(&clp->cl_lock); 6682 return ret; 6683 } 6684 6685 static struct nfs4_ol_stateid * 6686 find_lock_stateid(const struct nfs4_lockowner *lo, 6687 const struct nfs4_ol_stateid *ost) 6688 { 6689 struct nfs4_ol_stateid *lst; 6690 6691 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); 6692 6693 /* If ost is not hashed, ost->st_locks will not be valid */ 6694 if (!nfs4_ol_stateid_unhashed(ost)) 6695 list_for_each_entry(lst, &ost->st_locks, st_locks) { 6696 if (lst->st_stateowner == &lo->lo_owner) { 6697 refcount_inc(&lst->st_stid.sc_count); 6698 return lst; 6699 } 6700 } 6701 return NULL; 6702 } 6703 6704 static struct nfs4_ol_stateid * 6705 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 6706 struct nfs4_file *fp, struct inode *inode, 6707 struct nfs4_ol_stateid *open_stp) 6708 { 6709 struct nfs4_client *clp = lo->lo_owner.so_client; 6710 struct nfs4_ol_stateid *retstp; 6711 6712 mutex_init(&stp->st_mutex); 6713 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 6714 retry: 6715 spin_lock(&clp->cl_lock); 6716 if (nfs4_ol_stateid_unhashed(open_stp)) 6717 goto out_close; 6718 retstp = find_lock_stateid(lo, open_stp); 6719 if (retstp) 6720 goto out_found; 6721 refcount_inc(&stp->st_stid.sc_count); 6722 stp->st_stid.sc_type = NFS4_LOCK_STID; 6723 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 6724 get_nfs4_file(fp); 6725 stp->st_stid.sc_file = fp; 6726 stp->st_access_bmap = 0; 6727 stp->st_deny_bmap = open_stp->st_deny_bmap; 6728 stp->st_openstp = open_stp; 6729 spin_lock(&fp->fi_lock); 6730 list_add(&stp->st_locks, &open_stp->st_locks); 6731 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 6732 list_add(&stp->st_perfile, &fp->fi_stateids); 6733 spin_unlock(&fp->fi_lock); 6734 spin_unlock(&clp->cl_lock); 6735 return stp; 6736 out_found: 6737 spin_unlock(&clp->cl_lock); 6738 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 6739 nfs4_put_stid(&retstp->st_stid); 6740 goto retry; 6741 } 6742 /* To keep mutex tracking happy */ 6743 mutex_unlock(&stp->st_mutex); 6744 return retstp; 6745 out_close: 6746 spin_unlock(&clp->cl_lock); 6747 mutex_unlock(&stp->st_mutex); 6748 return NULL; 6749 } 6750 6751 static struct nfs4_ol_stateid * 6752 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 6753 struct inode *inode, struct nfs4_ol_stateid *ost, 6754 bool *new) 6755 { 6756 struct nfs4_stid *ns = NULL; 6757 struct nfs4_ol_stateid *lst; 6758 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 6759 struct nfs4_client *clp = oo->oo_owner.so_client; 6760 6761 *new = false; 6762 spin_lock(&clp->cl_lock); 6763 lst = find_lock_stateid(lo, ost); 6764 spin_unlock(&clp->cl_lock); 6765 if (lst != NULL) { 6766 if (nfsd4_lock_ol_stateid(lst) == nfs_ok) 6767 goto out; 6768 nfs4_put_stid(&lst->st_stid); 6769 } 6770 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); 6771 if (ns == NULL) 6772 return NULL; 6773 6774 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost); 6775 if (lst == openlockstateid(ns)) 6776 *new = true; 6777 else 6778 nfs4_put_stid(ns); 6779 out: 6780 return lst; 6781 } 6782 6783 static int 6784 check_lock_length(u64 offset, u64 length) 6785 { 6786 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 6787 (length > ~offset))); 6788 } 6789 6790 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 6791 { 6792 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 6793 6794 lockdep_assert_held(&fp->fi_lock); 6795 6796 if (test_access(access, lock_stp)) 6797 return; 6798 __nfs4_file_get_access(fp, access); 6799 set_access(access, lock_stp); 6800 } 6801 6802 static __be32 6803 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 6804 struct nfs4_ol_stateid *ost, 6805 struct nfsd4_lock *lock, 6806 struct nfs4_ol_stateid **plst, bool *new) 6807 { 6808 __be32 status; 6809 struct nfs4_file *fi = ost->st_stid.sc_file; 6810 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 6811 struct nfs4_client *cl = oo->oo_owner.so_client; 6812 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 6813 struct nfs4_lockowner *lo; 6814 struct nfs4_ol_stateid *lst; 6815 unsigned int strhashval; 6816 6817 lo = find_lockowner_str(cl, &lock->lk_new_owner); 6818 if (!lo) { 6819 strhashval = ownerstr_hashval(&lock->lk_new_owner); 6820 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 6821 if (lo == NULL) 6822 return nfserr_jukebox; 6823 } else { 6824 /* with an existing lockowner, seqids must be the same */ 6825 status = nfserr_bad_seqid; 6826 if (!cstate->minorversion && 6827 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 6828 goto out; 6829 } 6830 6831 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 6832 if (lst == NULL) { 6833 status = nfserr_jukebox; 6834 goto out; 6835 } 6836 6837 status = nfs_ok; 6838 *plst = lst; 6839 out: 6840 nfs4_put_stateowner(&lo->lo_owner); 6841 return status; 6842 } 6843 6844 /* 6845 * LOCK operation 6846 */ 6847 __be32 6848 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6849 union nfsd4_op_u *u) 6850 { 6851 struct nfsd4_lock *lock = &u->lock; 6852 struct nfs4_openowner *open_sop = NULL; 6853 struct nfs4_lockowner *lock_sop = NULL; 6854 struct nfs4_ol_stateid *lock_stp = NULL; 6855 struct nfs4_ol_stateid *open_stp = NULL; 6856 struct nfs4_file *fp; 6857 struct nfsd_file *nf = NULL; 6858 struct nfsd4_blocked_lock *nbl = NULL; 6859 struct file_lock *file_lock = NULL; 6860 struct file_lock *conflock = NULL; 6861 __be32 status = 0; 6862 int lkflg; 6863 int err; 6864 bool new = false; 6865 unsigned char fl_type; 6866 unsigned int fl_flags = FL_POSIX; 6867 struct net *net = SVC_NET(rqstp); 6868 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6869 6870 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 6871 (long long) lock->lk_offset, 6872 (long long) lock->lk_length); 6873 6874 if (check_lock_length(lock->lk_offset, lock->lk_length)) 6875 return nfserr_inval; 6876 6877 if ((status = fh_verify(rqstp, &cstate->current_fh, 6878 S_IFREG, NFSD_MAY_LOCK))) { 6879 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 6880 return status; 6881 } 6882 6883 if (lock->lk_is_new) { 6884 if (nfsd4_has_session(cstate)) 6885 /* See rfc 5661 18.10.3: given clientid is ignored: */ 6886 memcpy(&lock->lk_new_clientid, 6887 &cstate->clp->cl_clientid, 6888 sizeof(clientid_t)); 6889 6890 /* validate and update open stateid and open seqid */ 6891 status = nfs4_preprocess_confirmed_seqid_op(cstate, 6892 lock->lk_new_open_seqid, 6893 &lock->lk_new_open_stateid, 6894 &open_stp, nn); 6895 if (status) 6896 goto out; 6897 mutex_unlock(&open_stp->st_mutex); 6898 open_sop = openowner(open_stp->st_stateowner); 6899 status = nfserr_bad_stateid; 6900 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 6901 &lock->lk_new_clientid)) 6902 goto out; 6903 status = lookup_or_create_lock_state(cstate, open_stp, lock, 6904 &lock_stp, &new); 6905 } else { 6906 status = nfs4_preprocess_seqid_op(cstate, 6907 lock->lk_old_lock_seqid, 6908 &lock->lk_old_lock_stateid, 6909 NFS4_LOCK_STID, &lock_stp, nn); 6910 } 6911 if (status) 6912 goto out; 6913 lock_sop = lockowner(lock_stp->st_stateowner); 6914 6915 lkflg = setlkflg(lock->lk_type); 6916 status = nfs4_check_openmode(lock_stp, lkflg); 6917 if (status) 6918 goto out; 6919 6920 status = nfserr_grace; 6921 if (locks_in_grace(net) && !lock->lk_reclaim) 6922 goto out; 6923 status = nfserr_no_grace; 6924 if (!locks_in_grace(net) && lock->lk_reclaim) 6925 goto out; 6926 6927 if (lock->lk_reclaim) 6928 fl_flags |= FL_RECLAIM; 6929 6930 fp = lock_stp->st_stid.sc_file; 6931 switch (lock->lk_type) { 6932 case NFS4_READW_LT: 6933 if (nfsd4_has_session(cstate)) 6934 fl_flags |= FL_SLEEP; 6935 fallthrough; 6936 case NFS4_READ_LT: 6937 spin_lock(&fp->fi_lock); 6938 nf = find_readable_file_locked(fp); 6939 if (nf) 6940 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 6941 spin_unlock(&fp->fi_lock); 6942 fl_type = F_RDLCK; 6943 break; 6944 case NFS4_WRITEW_LT: 6945 if (nfsd4_has_session(cstate)) 6946 fl_flags |= FL_SLEEP; 6947 fallthrough; 6948 case NFS4_WRITE_LT: 6949 spin_lock(&fp->fi_lock); 6950 nf = find_writeable_file_locked(fp); 6951 if (nf) 6952 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 6953 spin_unlock(&fp->fi_lock); 6954 fl_type = F_WRLCK; 6955 break; 6956 default: 6957 status = nfserr_inval; 6958 goto out; 6959 } 6960 6961 if (!nf) { 6962 status = nfserr_openmode; 6963 goto out; 6964 } 6965 6966 /* 6967 * Most filesystems with their own ->lock operations will block 6968 * the nfsd thread waiting to acquire the lock. That leads to 6969 * deadlocks (we don't want every nfsd thread tied up waiting 6970 * for file locks), so don't attempt blocking lock notifications 6971 * on those filesystems: 6972 */ 6973 if (nf->nf_file->f_op->lock) 6974 fl_flags &= ~FL_SLEEP; 6975 6976 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); 6977 if (!nbl) { 6978 dprintk("NFSD: %s: unable to allocate block!\n", __func__); 6979 status = nfserr_jukebox; 6980 goto out; 6981 } 6982 6983 file_lock = &nbl->nbl_lock; 6984 file_lock->fl_type = fl_type; 6985 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 6986 file_lock->fl_pid = current->tgid; 6987 file_lock->fl_file = nf->nf_file; 6988 file_lock->fl_flags = fl_flags; 6989 file_lock->fl_lmops = &nfsd_posix_mng_ops; 6990 file_lock->fl_start = lock->lk_offset; 6991 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 6992 nfs4_transform_lock_offset(file_lock); 6993 6994 conflock = locks_alloc_lock(); 6995 if (!conflock) { 6996 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 6997 status = nfserr_jukebox; 6998 goto out; 6999 } 7000 7001 if (fl_flags & FL_SLEEP) { 7002 nbl->nbl_time = ktime_get_boottime_seconds(); 7003 spin_lock(&nn->blocked_locks_lock); 7004 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 7005 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 7006 kref_get(&nbl->nbl_kref); 7007 spin_unlock(&nn->blocked_locks_lock); 7008 } 7009 7010 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); 7011 switch (err) { 7012 case 0: /* success! */ 7013 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 7014 status = 0; 7015 if (lock->lk_reclaim) 7016 nn->somebody_reclaimed = true; 7017 break; 7018 case FILE_LOCK_DEFERRED: 7019 kref_put(&nbl->nbl_kref, free_nbl); 7020 nbl = NULL; 7021 fallthrough; 7022 case -EAGAIN: /* conflock holds conflicting lock */ 7023 status = nfserr_denied; 7024 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 7025 nfs4_set_lock_denied(conflock, &lock->lk_denied); 7026 break; 7027 case -EDEADLK: 7028 status = nfserr_deadlock; 7029 break; 7030 default: 7031 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 7032 status = nfserrno(err); 7033 break; 7034 } 7035 out: 7036 if (nbl) { 7037 /* dequeue it if we queued it before */ 7038 if (fl_flags & FL_SLEEP) { 7039 spin_lock(&nn->blocked_locks_lock); 7040 if (!list_empty(&nbl->nbl_list) && 7041 !list_empty(&nbl->nbl_lru)) { 7042 list_del_init(&nbl->nbl_list); 7043 list_del_init(&nbl->nbl_lru); 7044 kref_put(&nbl->nbl_kref, free_nbl); 7045 } 7046 /* nbl can use one of lists to be linked to reaplist */ 7047 spin_unlock(&nn->blocked_locks_lock); 7048 } 7049 free_blocked_lock(nbl); 7050 } 7051 if (nf) 7052 nfsd_file_put(nf); 7053 if (lock_stp) { 7054 /* Bump seqid manually if the 4.0 replay owner is openowner */ 7055 if (cstate->replay_owner && 7056 cstate->replay_owner != &lock_sop->lo_owner && 7057 seqid_mutating_err(ntohl(status))) 7058 lock_sop->lo_owner.so_seqid++; 7059 7060 /* 7061 * If this is a new, never-before-used stateid, and we are 7062 * returning an error, then just go ahead and release it. 7063 */ 7064 if (status && new) 7065 release_lock_stateid(lock_stp); 7066 7067 mutex_unlock(&lock_stp->st_mutex); 7068 7069 nfs4_put_stid(&lock_stp->st_stid); 7070 } 7071 if (open_stp) 7072 nfs4_put_stid(&open_stp->st_stid); 7073 nfsd4_bump_seqid(cstate, status); 7074 if (conflock) 7075 locks_free_lock(conflock); 7076 return status; 7077 } 7078 7079 /* 7080 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 7081 * so we do a temporary open here just to get an open file to pass to 7082 * vfs_test_lock. 7083 */ 7084 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 7085 { 7086 struct nfsd_file *nf; 7087 __be32 err; 7088 7089 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); 7090 if (err) 7091 return err; 7092 fh_lock(fhp); /* to block new leases till after test_lock: */ 7093 err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode, 7094 NFSD_MAY_READ)); 7095 if (err) 7096 goto out; 7097 lock->fl_file = nf->nf_file; 7098 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); 7099 lock->fl_file = NULL; 7100 out: 7101 fh_unlock(fhp); 7102 nfsd_file_put(nf); 7103 return err; 7104 } 7105 7106 /* 7107 * LOCKT operation 7108 */ 7109 __be32 7110 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7111 union nfsd4_op_u *u) 7112 { 7113 struct nfsd4_lockt *lockt = &u->lockt; 7114 struct file_lock *file_lock = NULL; 7115 struct nfs4_lockowner *lo = NULL; 7116 __be32 status; 7117 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7118 7119 if (locks_in_grace(SVC_NET(rqstp))) 7120 return nfserr_grace; 7121 7122 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 7123 return nfserr_inval; 7124 7125 if (!nfsd4_has_session(cstate)) { 7126 status = set_client(&lockt->lt_clientid, cstate, nn); 7127 if (status) 7128 goto out; 7129 } 7130 7131 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 7132 goto out; 7133 7134 file_lock = locks_alloc_lock(); 7135 if (!file_lock) { 7136 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 7137 status = nfserr_jukebox; 7138 goto out; 7139 } 7140 7141 switch (lockt->lt_type) { 7142 case NFS4_READ_LT: 7143 case NFS4_READW_LT: 7144 file_lock->fl_type = F_RDLCK; 7145 break; 7146 case NFS4_WRITE_LT: 7147 case NFS4_WRITEW_LT: 7148 file_lock->fl_type = F_WRLCK; 7149 break; 7150 default: 7151 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 7152 status = nfserr_inval; 7153 goto out; 7154 } 7155 7156 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); 7157 if (lo) 7158 file_lock->fl_owner = (fl_owner_t)lo; 7159 file_lock->fl_pid = current->tgid; 7160 file_lock->fl_flags = FL_POSIX; 7161 7162 file_lock->fl_start = lockt->lt_offset; 7163 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 7164 7165 nfs4_transform_lock_offset(file_lock); 7166 7167 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 7168 if (status) 7169 goto out; 7170 7171 if (file_lock->fl_type != F_UNLCK) { 7172 status = nfserr_denied; 7173 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 7174 } 7175 out: 7176 if (lo) 7177 nfs4_put_stateowner(&lo->lo_owner); 7178 if (file_lock) 7179 locks_free_lock(file_lock); 7180 return status; 7181 } 7182 7183 __be32 7184 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7185 union nfsd4_op_u *u) 7186 { 7187 struct nfsd4_locku *locku = &u->locku; 7188 struct nfs4_ol_stateid *stp; 7189 struct nfsd_file *nf = NULL; 7190 struct file_lock *file_lock = NULL; 7191 __be32 status; 7192 int err; 7193 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7194 7195 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 7196 (long long) locku->lu_offset, 7197 (long long) locku->lu_length); 7198 7199 if (check_lock_length(locku->lu_offset, locku->lu_length)) 7200 return nfserr_inval; 7201 7202 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 7203 &locku->lu_stateid, NFS4_LOCK_STID, 7204 &stp, nn); 7205 if (status) 7206 goto out; 7207 nf = find_any_file(stp->st_stid.sc_file); 7208 if (!nf) { 7209 status = nfserr_lock_range; 7210 goto put_stateid; 7211 } 7212 file_lock = locks_alloc_lock(); 7213 if (!file_lock) { 7214 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 7215 status = nfserr_jukebox; 7216 goto put_file; 7217 } 7218 7219 file_lock->fl_type = F_UNLCK; 7220 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 7221 file_lock->fl_pid = current->tgid; 7222 file_lock->fl_file = nf->nf_file; 7223 file_lock->fl_flags = FL_POSIX; 7224 file_lock->fl_lmops = &nfsd_posix_mng_ops; 7225 file_lock->fl_start = locku->lu_offset; 7226 7227 file_lock->fl_end = last_byte_offset(locku->lu_offset, 7228 locku->lu_length); 7229 nfs4_transform_lock_offset(file_lock); 7230 7231 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); 7232 if (err) { 7233 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 7234 goto out_nfserr; 7235 } 7236 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); 7237 put_file: 7238 nfsd_file_put(nf); 7239 put_stateid: 7240 mutex_unlock(&stp->st_mutex); 7241 nfs4_put_stid(&stp->st_stid); 7242 out: 7243 nfsd4_bump_seqid(cstate, status); 7244 if (file_lock) 7245 locks_free_lock(file_lock); 7246 return status; 7247 7248 out_nfserr: 7249 status = nfserrno(err); 7250 goto put_file; 7251 } 7252 7253 /* 7254 * returns 7255 * true: locks held by lockowner 7256 * false: no locks held by lockowner 7257 */ 7258 static bool 7259 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 7260 { 7261 struct file_lock *fl; 7262 int status = false; 7263 struct nfsd_file *nf = find_any_file(fp); 7264 struct inode *inode; 7265 struct file_lock_context *flctx; 7266 7267 if (!nf) { 7268 /* Any valid lock stateid should have some sort of access */ 7269 WARN_ON_ONCE(1); 7270 return status; 7271 } 7272 7273 inode = locks_inode(nf->nf_file); 7274 flctx = inode->i_flctx; 7275 7276 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 7277 spin_lock(&flctx->flc_lock); 7278 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 7279 if (fl->fl_owner == (fl_owner_t)lowner) { 7280 status = true; 7281 break; 7282 } 7283 } 7284 spin_unlock(&flctx->flc_lock); 7285 } 7286 nfsd_file_put(nf); 7287 return status; 7288 } 7289 7290 __be32 7291 nfsd4_release_lockowner(struct svc_rqst *rqstp, 7292 struct nfsd4_compound_state *cstate, 7293 union nfsd4_op_u *u) 7294 { 7295 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; 7296 clientid_t *clid = &rlockowner->rl_clientid; 7297 struct nfs4_stateowner *sop; 7298 struct nfs4_lockowner *lo = NULL; 7299 struct nfs4_ol_stateid *stp; 7300 struct xdr_netobj *owner = &rlockowner->rl_owner; 7301 unsigned int hashval = ownerstr_hashval(owner); 7302 __be32 status; 7303 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7304 struct nfs4_client *clp; 7305 LIST_HEAD (reaplist); 7306 7307 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 7308 clid->cl_boot, clid->cl_id); 7309 7310 status = set_client(clid, cstate, nn); 7311 if (status) 7312 return status; 7313 7314 clp = cstate->clp; 7315 /* Find the matching lock stateowner */ 7316 spin_lock(&clp->cl_lock); 7317 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval], 7318 so_strhash) { 7319 7320 if (sop->so_is_open_owner || !same_owner_str(sop, owner)) 7321 continue; 7322 7323 /* see if there are still any locks associated with it */ 7324 lo = lockowner(sop); 7325 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { 7326 if (check_for_locks(stp->st_stid.sc_file, lo)) { 7327 status = nfserr_locks_held; 7328 spin_unlock(&clp->cl_lock); 7329 return status; 7330 } 7331 } 7332 7333 nfs4_get_stateowner(sop); 7334 break; 7335 } 7336 if (!lo) { 7337 spin_unlock(&clp->cl_lock); 7338 return status; 7339 } 7340 7341 unhash_lockowner_locked(lo); 7342 while (!list_empty(&lo->lo_owner.so_stateids)) { 7343 stp = list_first_entry(&lo->lo_owner.so_stateids, 7344 struct nfs4_ol_stateid, 7345 st_perstateowner); 7346 WARN_ON(!unhash_lock_stateid(stp)); 7347 put_ol_stateid_locked(stp, &reaplist); 7348 } 7349 spin_unlock(&clp->cl_lock); 7350 free_ol_stateid_reaplist(&reaplist); 7351 remove_blocked_locks(lo); 7352 nfs4_put_stateowner(&lo->lo_owner); 7353 7354 return status; 7355 } 7356 7357 static inline struct nfs4_client_reclaim * 7358 alloc_reclaim(void) 7359 { 7360 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 7361 } 7362 7363 bool 7364 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) 7365 { 7366 struct nfs4_client_reclaim *crp; 7367 7368 crp = nfsd4_find_reclaim_client(name, nn); 7369 return (crp && crp->cr_clp); 7370 } 7371 7372 /* 7373 * failure => all reset bets are off, nfserr_no_grace... 7374 * 7375 * The caller is responsible for freeing name.data if NULL is returned (it 7376 * will be freed in nfs4_remove_reclaim_record in the normal case). 7377 */ 7378 struct nfs4_client_reclaim * 7379 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, 7380 struct nfsd_net *nn) 7381 { 7382 unsigned int strhashval; 7383 struct nfs4_client_reclaim *crp; 7384 7385 crp = alloc_reclaim(); 7386 if (crp) { 7387 strhashval = clientstr_hashval(name); 7388 INIT_LIST_HEAD(&crp->cr_strhash); 7389 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 7390 crp->cr_name.data = name.data; 7391 crp->cr_name.len = name.len; 7392 crp->cr_princhash.data = princhash.data; 7393 crp->cr_princhash.len = princhash.len; 7394 crp->cr_clp = NULL; 7395 nn->reclaim_str_hashtbl_size++; 7396 } 7397 return crp; 7398 } 7399 7400 void 7401 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 7402 { 7403 list_del(&crp->cr_strhash); 7404 kfree(crp->cr_name.data); 7405 kfree(crp->cr_princhash.data); 7406 kfree(crp); 7407 nn->reclaim_str_hashtbl_size--; 7408 } 7409 7410 void 7411 nfs4_release_reclaim(struct nfsd_net *nn) 7412 { 7413 struct nfs4_client_reclaim *crp = NULL; 7414 int i; 7415 7416 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7417 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 7418 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 7419 struct nfs4_client_reclaim, cr_strhash); 7420 nfs4_remove_reclaim_record(crp, nn); 7421 } 7422 } 7423 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 7424 } 7425 7426 /* 7427 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 7428 struct nfs4_client_reclaim * 7429 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) 7430 { 7431 unsigned int strhashval; 7432 struct nfs4_client_reclaim *crp = NULL; 7433 7434 strhashval = clientstr_hashval(name); 7435 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 7436 if (compare_blob(&crp->cr_name, &name) == 0) { 7437 return crp; 7438 } 7439 } 7440 return NULL; 7441 } 7442 7443 __be32 7444 nfs4_check_open_reclaim(struct nfs4_client *clp) 7445 { 7446 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 7447 return nfserr_no_grace; 7448 7449 if (nfsd4_client_record_check(clp)) 7450 return nfserr_reclaim_bad; 7451 7452 return nfs_ok; 7453 } 7454 7455 /* 7456 * Since the lifetime of a delegation isn't limited to that of an open, a 7457 * client may quite reasonably hang on to a delegation as long as it has 7458 * the inode cached. This becomes an obvious problem the first time a 7459 * client's inode cache approaches the size of the server's total memory. 7460 * 7461 * For now we avoid this problem by imposing a hard limit on the number 7462 * of delegations, which varies according to the server's memory size. 7463 */ 7464 static void 7465 set_max_delegations(void) 7466 { 7467 /* 7468 * Allow at most 4 delegations per megabyte of RAM. Quick 7469 * estimates suggest that in the worst case (where every delegation 7470 * is for a different inode), a delegation could take about 1.5K, 7471 * giving a worst case usage of about 6% of memory. 7472 */ 7473 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 7474 } 7475 7476 static int nfs4_state_create_net(struct net *net) 7477 { 7478 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7479 int i; 7480 7481 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 7482 sizeof(struct list_head), 7483 GFP_KERNEL); 7484 if (!nn->conf_id_hashtbl) 7485 goto err; 7486 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 7487 sizeof(struct list_head), 7488 GFP_KERNEL); 7489 if (!nn->unconf_id_hashtbl) 7490 goto err_unconf_id; 7491 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, 7492 sizeof(struct list_head), 7493 GFP_KERNEL); 7494 if (!nn->sessionid_hashtbl) 7495 goto err_sessionid; 7496 7497 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7498 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 7499 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 7500 } 7501 for (i = 0; i < SESSION_HASH_SIZE; i++) 7502 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 7503 nn->conf_name_tree = RB_ROOT; 7504 nn->unconf_name_tree = RB_ROOT; 7505 nn->boot_time = ktime_get_real_seconds(); 7506 nn->grace_ended = false; 7507 nn->nfsd4_manager.block_opens = true; 7508 INIT_LIST_HEAD(&nn->nfsd4_manager.list); 7509 INIT_LIST_HEAD(&nn->client_lru); 7510 INIT_LIST_HEAD(&nn->close_lru); 7511 INIT_LIST_HEAD(&nn->del_recall_lru); 7512 spin_lock_init(&nn->client_lock); 7513 spin_lock_init(&nn->s2s_cp_lock); 7514 idr_init(&nn->s2s_cp_stateids); 7515 7516 spin_lock_init(&nn->blocked_locks_lock); 7517 INIT_LIST_HEAD(&nn->blocked_locks_lru); 7518 7519 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 7520 get_net(net); 7521 7522 return 0; 7523 7524 err_sessionid: 7525 kfree(nn->unconf_id_hashtbl); 7526 err_unconf_id: 7527 kfree(nn->conf_id_hashtbl); 7528 err: 7529 return -ENOMEM; 7530 } 7531 7532 static void 7533 nfs4_state_destroy_net(struct net *net) 7534 { 7535 int i; 7536 struct nfs4_client *clp = NULL; 7537 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7538 7539 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7540 while (!list_empty(&nn->conf_id_hashtbl[i])) { 7541 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 7542 destroy_client(clp); 7543 } 7544 } 7545 7546 WARN_ON(!list_empty(&nn->blocked_locks_lru)); 7547 7548 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7549 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 7550 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 7551 destroy_client(clp); 7552 } 7553 } 7554 7555 kfree(nn->sessionid_hashtbl); 7556 kfree(nn->unconf_id_hashtbl); 7557 kfree(nn->conf_id_hashtbl); 7558 put_net(net); 7559 } 7560 7561 int 7562 nfs4_state_start_net(struct net *net) 7563 { 7564 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7565 int ret; 7566 7567 ret = nfs4_state_create_net(net); 7568 if (ret) 7569 return ret; 7570 locks_start_grace(net, &nn->nfsd4_manager); 7571 nfsd4_client_tracking_init(net); 7572 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) 7573 goto skip_grace; 7574 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", 7575 nn->nfsd4_grace, net->ns.inum); 7576 trace_nfsd_grace_start(nn); 7577 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 7578 return 0; 7579 7580 skip_grace: 7581 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", 7582 net->ns.inum); 7583 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); 7584 nfsd4_end_grace(nn); 7585 return 0; 7586 } 7587 7588 /* initialization to perform when the nfsd service is started: */ 7589 7590 int 7591 nfs4_state_start(void) 7592 { 7593 int ret; 7594 7595 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); 7596 if (laundry_wq == NULL) { 7597 ret = -ENOMEM; 7598 goto out; 7599 } 7600 ret = nfsd4_create_callback_queue(); 7601 if (ret) 7602 goto out_free_laundry; 7603 7604 set_max_delegations(); 7605 return 0; 7606 7607 out_free_laundry: 7608 destroy_workqueue(laundry_wq); 7609 out: 7610 return ret; 7611 } 7612 7613 void 7614 nfs4_state_shutdown_net(struct net *net) 7615 { 7616 struct nfs4_delegation *dp = NULL; 7617 struct list_head *pos, *next, reaplist; 7618 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7619 7620 cancel_delayed_work_sync(&nn->laundromat_work); 7621 locks_end_grace(&nn->nfsd4_manager); 7622 7623 INIT_LIST_HEAD(&reaplist); 7624 spin_lock(&state_lock); 7625 list_for_each_safe(pos, next, &nn->del_recall_lru) { 7626 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 7627 WARN_ON(!unhash_delegation_locked(dp)); 7628 list_add(&dp->dl_recall_lru, &reaplist); 7629 } 7630 spin_unlock(&state_lock); 7631 list_for_each_safe(pos, next, &reaplist) { 7632 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 7633 list_del_init(&dp->dl_recall_lru); 7634 destroy_unhashed_deleg(dp); 7635 } 7636 7637 nfsd4_client_tracking_exit(net); 7638 nfs4_state_destroy_net(net); 7639 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 7640 nfsd4_ssc_shutdown_umount(nn); 7641 #endif 7642 } 7643 7644 void 7645 nfs4_state_shutdown(void) 7646 { 7647 destroy_workqueue(laundry_wq); 7648 nfsd4_destroy_callback_queue(); 7649 } 7650 7651 static void 7652 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7653 { 7654 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && 7655 CURRENT_STATEID(stateid)) 7656 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 7657 } 7658 7659 static void 7660 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7661 { 7662 if (cstate->minorversion) { 7663 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 7664 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 7665 } 7666 } 7667 7668 void 7669 clear_current_stateid(struct nfsd4_compound_state *cstate) 7670 { 7671 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 7672 } 7673 7674 /* 7675 * functions to set current state id 7676 */ 7677 void 7678 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, 7679 union nfsd4_op_u *u) 7680 { 7681 put_stateid(cstate, &u->open_downgrade.od_stateid); 7682 } 7683 7684 void 7685 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, 7686 union nfsd4_op_u *u) 7687 { 7688 put_stateid(cstate, &u->open.op_stateid); 7689 } 7690 7691 void 7692 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, 7693 union nfsd4_op_u *u) 7694 { 7695 put_stateid(cstate, &u->close.cl_stateid); 7696 } 7697 7698 void 7699 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, 7700 union nfsd4_op_u *u) 7701 { 7702 put_stateid(cstate, &u->lock.lk_resp_stateid); 7703 } 7704 7705 /* 7706 * functions to consume current state id 7707 */ 7708 7709 void 7710 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, 7711 union nfsd4_op_u *u) 7712 { 7713 get_stateid(cstate, &u->open_downgrade.od_stateid); 7714 } 7715 7716 void 7717 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, 7718 union nfsd4_op_u *u) 7719 { 7720 get_stateid(cstate, &u->delegreturn.dr_stateid); 7721 } 7722 7723 void 7724 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, 7725 union nfsd4_op_u *u) 7726 { 7727 get_stateid(cstate, &u->free_stateid.fr_stateid); 7728 } 7729 7730 void 7731 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, 7732 union nfsd4_op_u *u) 7733 { 7734 get_stateid(cstate, &u->setattr.sa_stateid); 7735 } 7736 7737 void 7738 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, 7739 union nfsd4_op_u *u) 7740 { 7741 get_stateid(cstate, &u->close.cl_stateid); 7742 } 7743 7744 void 7745 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, 7746 union nfsd4_op_u *u) 7747 { 7748 get_stateid(cstate, &u->locku.lu_stateid); 7749 } 7750 7751 void 7752 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, 7753 union nfsd4_op_u *u) 7754 { 7755 get_stateid(cstate, &u->read.rd_stateid); 7756 } 7757 7758 void 7759 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, 7760 union nfsd4_op_u *u) 7761 { 7762 get_stateid(cstate, &u->write.wr_stateid); 7763 } 7764