1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include "xdr4.h" 46 #include "xdr4cb.h" 47 #include "vfs.h" 48 #include "current_stateid.h" 49 50 #include "netns.h" 51 #include "pnfs.h" 52 53 #define NFSDDBG_FACILITY NFSDDBG_PROC 54 55 #define all_ones {{~0,~0},~0} 56 static const stateid_t one_stateid = { 57 .si_generation = ~0, 58 .si_opaque = all_ones, 59 }; 60 static const stateid_t zero_stateid = { 61 /* all fields zero */ 62 }; 63 static const stateid_t currentstateid = { 64 .si_generation = 1, 65 }; 66 67 static u64 current_sessionid = 1; 68 69 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 70 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 71 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 72 73 /* forward declarations */ 74 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 75 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 76 77 /* Locking: */ 78 79 /* 80 * Currently used for the del_recall_lru and file hash table. In an 81 * effort to decrease the scope of the client_mutex, this spinlock may 82 * eventually cover more: 83 */ 84 static DEFINE_SPINLOCK(state_lock); 85 86 /* 87 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 88 * the refcount on the open stateid to drop. 89 */ 90 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 91 92 static struct kmem_cache *openowner_slab; 93 static struct kmem_cache *lockowner_slab; 94 static struct kmem_cache *file_slab; 95 static struct kmem_cache *stateid_slab; 96 static struct kmem_cache *deleg_slab; 97 static struct kmem_cache *odstate_slab; 98 99 static void free_session(struct nfsd4_session *); 100 101 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 102 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 103 104 static bool is_session_dead(struct nfsd4_session *ses) 105 { 106 return ses->se_flags & NFS4_SESSION_DEAD; 107 } 108 109 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 110 { 111 if (atomic_read(&ses->se_ref) > ref_held_by_me) 112 return nfserr_jukebox; 113 ses->se_flags |= NFS4_SESSION_DEAD; 114 return nfs_ok; 115 } 116 117 static bool is_client_expired(struct nfs4_client *clp) 118 { 119 return clp->cl_time == 0; 120 } 121 122 static __be32 get_client_locked(struct nfs4_client *clp) 123 { 124 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 125 126 lockdep_assert_held(&nn->client_lock); 127 128 if (is_client_expired(clp)) 129 return nfserr_expired; 130 atomic_inc(&clp->cl_refcount); 131 return nfs_ok; 132 } 133 134 /* must be called under the client_lock */ 135 static inline void 136 renew_client_locked(struct nfs4_client *clp) 137 { 138 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 139 140 if (is_client_expired(clp)) { 141 WARN_ON(1); 142 printk("%s: client (clientid %08x/%08x) already expired\n", 143 __func__, 144 clp->cl_clientid.cl_boot, 145 clp->cl_clientid.cl_id); 146 return; 147 } 148 149 dprintk("renewing client (clientid %08x/%08x)\n", 150 clp->cl_clientid.cl_boot, 151 clp->cl_clientid.cl_id); 152 list_move_tail(&clp->cl_lru, &nn->client_lru); 153 clp->cl_time = get_seconds(); 154 } 155 156 static void put_client_renew_locked(struct nfs4_client *clp) 157 { 158 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 159 160 lockdep_assert_held(&nn->client_lock); 161 162 if (!atomic_dec_and_test(&clp->cl_refcount)) 163 return; 164 if (!is_client_expired(clp)) 165 renew_client_locked(clp); 166 } 167 168 static void put_client_renew(struct nfs4_client *clp) 169 { 170 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 171 172 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) 173 return; 174 if (!is_client_expired(clp)) 175 renew_client_locked(clp); 176 spin_unlock(&nn->client_lock); 177 } 178 179 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 180 { 181 __be32 status; 182 183 if (is_session_dead(ses)) 184 return nfserr_badsession; 185 status = get_client_locked(ses->se_client); 186 if (status) 187 return status; 188 atomic_inc(&ses->se_ref); 189 return nfs_ok; 190 } 191 192 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 193 { 194 struct nfs4_client *clp = ses->se_client; 195 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 196 197 lockdep_assert_held(&nn->client_lock); 198 199 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 200 free_session(ses); 201 put_client_renew_locked(clp); 202 } 203 204 static void nfsd4_put_session(struct nfsd4_session *ses) 205 { 206 struct nfs4_client *clp = ses->se_client; 207 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 208 209 spin_lock(&nn->client_lock); 210 nfsd4_put_session_locked(ses); 211 spin_unlock(&nn->client_lock); 212 } 213 214 static struct nfsd4_blocked_lock * 215 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 216 struct nfsd_net *nn) 217 { 218 struct nfsd4_blocked_lock *cur, *found = NULL; 219 220 spin_lock(&nn->blocked_locks_lock); 221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 222 if (fh_match(fh, &cur->nbl_fh)) { 223 list_del_init(&cur->nbl_list); 224 list_del_init(&cur->nbl_lru); 225 found = cur; 226 break; 227 } 228 } 229 spin_unlock(&nn->blocked_locks_lock); 230 if (found) 231 posix_unblock_lock(&found->nbl_lock); 232 return found; 233 } 234 235 static struct nfsd4_blocked_lock * 236 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 237 struct nfsd_net *nn) 238 { 239 struct nfsd4_blocked_lock *nbl; 240 241 nbl = find_blocked_lock(lo, fh, nn); 242 if (!nbl) { 243 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); 244 if (nbl) { 245 fh_copy_shallow(&nbl->nbl_fh, fh); 246 locks_init_lock(&nbl->nbl_lock); 247 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, 248 &nfsd4_cb_notify_lock_ops, 249 NFSPROC4_CLNT_CB_NOTIFY_LOCK); 250 } 251 } 252 return nbl; 253 } 254 255 static void 256 free_blocked_lock(struct nfsd4_blocked_lock *nbl) 257 { 258 locks_release_private(&nbl->nbl_lock); 259 kfree(nbl); 260 } 261 262 static int 263 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 264 { 265 /* 266 * Since this is just an optimization, we don't try very hard if it 267 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and 268 * just quit trying on anything else. 269 */ 270 switch (task->tk_status) { 271 case -NFS4ERR_DELAY: 272 rpc_delay(task, 1 * HZ); 273 return 0; 274 default: 275 return 1; 276 } 277 } 278 279 static void 280 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) 281 { 282 struct nfsd4_blocked_lock *nbl = container_of(cb, 283 struct nfsd4_blocked_lock, nbl_cb); 284 285 free_blocked_lock(nbl); 286 } 287 288 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { 289 .done = nfsd4_cb_notify_lock_done, 290 .release = nfsd4_cb_notify_lock_release, 291 }; 292 293 static inline struct nfs4_stateowner * 294 nfs4_get_stateowner(struct nfs4_stateowner *sop) 295 { 296 atomic_inc(&sop->so_count); 297 return sop; 298 } 299 300 static int 301 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 302 { 303 return (sop->so_owner.len == owner->len) && 304 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 305 } 306 307 static struct nfs4_openowner * 308 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, 309 struct nfs4_client *clp) 310 { 311 struct nfs4_stateowner *so; 312 313 lockdep_assert_held(&clp->cl_lock); 314 315 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 316 so_strhash) { 317 if (!so->so_is_open_owner) 318 continue; 319 if (same_owner_str(so, &open->op_owner)) 320 return openowner(nfs4_get_stateowner(so)); 321 } 322 return NULL; 323 } 324 325 static struct nfs4_openowner * 326 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 327 struct nfs4_client *clp) 328 { 329 struct nfs4_openowner *oo; 330 331 spin_lock(&clp->cl_lock); 332 oo = find_openstateowner_str_locked(hashval, open, clp); 333 spin_unlock(&clp->cl_lock); 334 return oo; 335 } 336 337 static inline u32 338 opaque_hashval(const void *ptr, int nbytes) 339 { 340 unsigned char *cptr = (unsigned char *) ptr; 341 342 u32 x = 0; 343 while (nbytes--) { 344 x *= 37; 345 x += *cptr++; 346 } 347 return x; 348 } 349 350 static void nfsd4_free_file_rcu(struct rcu_head *rcu) 351 { 352 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); 353 354 kmem_cache_free(file_slab, fp); 355 } 356 357 void 358 put_nfs4_file(struct nfs4_file *fi) 359 { 360 might_lock(&state_lock); 361 362 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { 363 hlist_del_rcu(&fi->fi_hash); 364 spin_unlock(&state_lock); 365 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 366 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 367 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 368 } 369 } 370 371 static struct file * 372 __nfs4_get_fd(struct nfs4_file *f, int oflag) 373 { 374 if (f->fi_fds[oflag]) 375 return get_file(f->fi_fds[oflag]); 376 return NULL; 377 } 378 379 static struct file * 380 find_writeable_file_locked(struct nfs4_file *f) 381 { 382 struct file *ret; 383 384 lockdep_assert_held(&f->fi_lock); 385 386 ret = __nfs4_get_fd(f, O_WRONLY); 387 if (!ret) 388 ret = __nfs4_get_fd(f, O_RDWR); 389 return ret; 390 } 391 392 static struct file * 393 find_writeable_file(struct nfs4_file *f) 394 { 395 struct file *ret; 396 397 spin_lock(&f->fi_lock); 398 ret = find_writeable_file_locked(f); 399 spin_unlock(&f->fi_lock); 400 401 return ret; 402 } 403 404 static struct file *find_readable_file_locked(struct nfs4_file *f) 405 { 406 struct file *ret; 407 408 lockdep_assert_held(&f->fi_lock); 409 410 ret = __nfs4_get_fd(f, O_RDONLY); 411 if (!ret) 412 ret = __nfs4_get_fd(f, O_RDWR); 413 return ret; 414 } 415 416 static struct file * 417 find_readable_file(struct nfs4_file *f) 418 { 419 struct file *ret; 420 421 spin_lock(&f->fi_lock); 422 ret = find_readable_file_locked(f); 423 spin_unlock(&f->fi_lock); 424 425 return ret; 426 } 427 428 struct file * 429 find_any_file(struct nfs4_file *f) 430 { 431 struct file *ret; 432 433 spin_lock(&f->fi_lock); 434 ret = __nfs4_get_fd(f, O_RDWR); 435 if (!ret) { 436 ret = __nfs4_get_fd(f, O_WRONLY); 437 if (!ret) 438 ret = __nfs4_get_fd(f, O_RDONLY); 439 } 440 spin_unlock(&f->fi_lock); 441 return ret; 442 } 443 444 static atomic_long_t num_delegations; 445 unsigned long max_delegations; 446 447 /* 448 * Open owner state (share locks) 449 */ 450 451 /* hash tables for lock and open owners */ 452 #define OWNER_HASH_BITS 8 453 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 454 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 455 456 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 457 { 458 unsigned int ret; 459 460 ret = opaque_hashval(ownername->data, ownername->len); 461 return ret & OWNER_HASH_MASK; 462 } 463 464 /* hash table for nfs4_file */ 465 #define FILE_HASH_BITS 8 466 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 467 468 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh) 469 { 470 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0); 471 } 472 473 static unsigned int file_hashval(struct knfsd_fh *fh) 474 { 475 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); 476 } 477 478 static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; 479 480 static void 481 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 482 { 483 lockdep_assert_held(&fp->fi_lock); 484 485 if (access & NFS4_SHARE_ACCESS_WRITE) 486 atomic_inc(&fp->fi_access[O_WRONLY]); 487 if (access & NFS4_SHARE_ACCESS_READ) 488 atomic_inc(&fp->fi_access[O_RDONLY]); 489 } 490 491 static __be32 492 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 493 { 494 lockdep_assert_held(&fp->fi_lock); 495 496 /* Does this access mode make sense? */ 497 if (access & ~NFS4_SHARE_ACCESS_BOTH) 498 return nfserr_inval; 499 500 /* Does it conflict with a deny mode already set? */ 501 if ((access & fp->fi_share_deny) != 0) 502 return nfserr_share_denied; 503 504 __nfs4_file_get_access(fp, access); 505 return nfs_ok; 506 } 507 508 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 509 { 510 /* Common case is that there is no deny mode. */ 511 if (deny) { 512 /* Does this deny mode make sense? */ 513 if (deny & ~NFS4_SHARE_DENY_BOTH) 514 return nfserr_inval; 515 516 if ((deny & NFS4_SHARE_DENY_READ) && 517 atomic_read(&fp->fi_access[O_RDONLY])) 518 return nfserr_share_denied; 519 520 if ((deny & NFS4_SHARE_DENY_WRITE) && 521 atomic_read(&fp->fi_access[O_WRONLY])) 522 return nfserr_share_denied; 523 } 524 return nfs_ok; 525 } 526 527 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 528 { 529 might_lock(&fp->fi_lock); 530 531 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 532 struct file *f1 = NULL; 533 struct file *f2 = NULL; 534 535 swap(f1, fp->fi_fds[oflag]); 536 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 537 swap(f2, fp->fi_fds[O_RDWR]); 538 spin_unlock(&fp->fi_lock); 539 if (f1) 540 fput(f1); 541 if (f2) 542 fput(f2); 543 } 544 } 545 546 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 547 { 548 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 549 550 if (access & NFS4_SHARE_ACCESS_WRITE) 551 __nfs4_file_put_access(fp, O_WRONLY); 552 if (access & NFS4_SHARE_ACCESS_READ) 553 __nfs4_file_put_access(fp, O_RDONLY); 554 } 555 556 /* 557 * Allocate a new open/delegation state counter. This is needed for 558 * pNFS for proper return on close semantics. 559 * 560 * Note that we only allocate it for pNFS-enabled exports, otherwise 561 * all pointers to struct nfs4_clnt_odstate are always NULL. 562 */ 563 static struct nfs4_clnt_odstate * 564 alloc_clnt_odstate(struct nfs4_client *clp) 565 { 566 struct nfs4_clnt_odstate *co; 567 568 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 569 if (co) { 570 co->co_client = clp; 571 atomic_set(&co->co_odcount, 1); 572 } 573 return co; 574 } 575 576 static void 577 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 578 { 579 struct nfs4_file *fp = co->co_file; 580 581 lockdep_assert_held(&fp->fi_lock); 582 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 583 } 584 585 static inline void 586 get_clnt_odstate(struct nfs4_clnt_odstate *co) 587 { 588 if (co) 589 atomic_inc(&co->co_odcount); 590 } 591 592 static void 593 put_clnt_odstate(struct nfs4_clnt_odstate *co) 594 { 595 struct nfs4_file *fp; 596 597 if (!co) 598 return; 599 600 fp = co->co_file; 601 if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 602 list_del(&co->co_perfile); 603 spin_unlock(&fp->fi_lock); 604 605 nfsd4_return_all_file_layouts(co->co_client, fp); 606 kmem_cache_free(odstate_slab, co); 607 } 608 } 609 610 static struct nfs4_clnt_odstate * 611 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 612 { 613 struct nfs4_clnt_odstate *co; 614 struct nfs4_client *cl; 615 616 if (!new) 617 return NULL; 618 619 cl = new->co_client; 620 621 spin_lock(&fp->fi_lock); 622 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 623 if (co->co_client == cl) { 624 get_clnt_odstate(co); 625 goto out; 626 } 627 } 628 co = new; 629 co->co_file = fp; 630 hash_clnt_odstate_locked(new); 631 out: 632 spin_unlock(&fp->fi_lock); 633 return co; 634 } 635 636 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 637 void (*sc_free)(struct nfs4_stid *)) 638 { 639 struct nfs4_stid *stid; 640 int new_id; 641 642 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 643 if (!stid) 644 return NULL; 645 646 idr_preload(GFP_KERNEL); 647 spin_lock(&cl->cl_lock); 648 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT); 649 spin_unlock(&cl->cl_lock); 650 idr_preload_end(); 651 if (new_id < 0) 652 goto out_free; 653 654 stid->sc_free = sc_free; 655 stid->sc_client = cl; 656 stid->sc_stateid.si_opaque.so_id = new_id; 657 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 658 /* Will be incremented before return to client: */ 659 atomic_set(&stid->sc_count, 1); 660 spin_lock_init(&stid->sc_lock); 661 662 /* 663 * It shouldn't be a problem to reuse an opaque stateid value. 664 * I don't think it is for 4.1. But with 4.0 I worry that, for 665 * example, a stray write retransmission could be accepted by 666 * the server when it should have been rejected. Therefore, 667 * adopt a trick from the sctp code to attempt to maximize the 668 * amount of time until an id is reused, by ensuring they always 669 * "increase" (mod INT_MAX): 670 */ 671 return stid; 672 out_free: 673 kmem_cache_free(slab, stid); 674 return NULL; 675 } 676 677 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 678 { 679 struct nfs4_stid *stid; 680 681 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); 682 if (!stid) 683 return NULL; 684 685 return openlockstateid(stid); 686 } 687 688 static void nfs4_free_deleg(struct nfs4_stid *stid) 689 { 690 kmem_cache_free(deleg_slab, stid); 691 atomic_long_dec(&num_delegations); 692 } 693 694 /* 695 * When we recall a delegation, we should be careful not to hand it 696 * out again straight away. 697 * To ensure this we keep a pair of bloom filters ('new' and 'old') 698 * in which the filehandles of recalled delegations are "stored". 699 * If a filehandle appear in either filter, a delegation is blocked. 700 * When a delegation is recalled, the filehandle is stored in the "new" 701 * filter. 702 * Every 30 seconds we swap the filters and clear the "new" one, 703 * unless both are empty of course. 704 * 705 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 706 * low 3 bytes as hash-table indices. 707 * 708 * 'blocked_delegations_lock', which is always taken in block_delegations(), 709 * is used to manage concurrent access. Testing does not need the lock 710 * except when swapping the two filters. 711 */ 712 static DEFINE_SPINLOCK(blocked_delegations_lock); 713 static struct bloom_pair { 714 int entries, old_entries; 715 time_t swap_time; 716 int new; /* index into 'set' */ 717 DECLARE_BITMAP(set[2], 256); 718 } blocked_delegations; 719 720 static int delegation_blocked(struct knfsd_fh *fh) 721 { 722 u32 hash; 723 struct bloom_pair *bd = &blocked_delegations; 724 725 if (bd->entries == 0) 726 return 0; 727 if (seconds_since_boot() - bd->swap_time > 30) { 728 spin_lock(&blocked_delegations_lock); 729 if (seconds_since_boot() - bd->swap_time > 30) { 730 bd->entries -= bd->old_entries; 731 bd->old_entries = bd->entries; 732 memset(bd->set[bd->new], 0, 733 sizeof(bd->set[0])); 734 bd->new = 1-bd->new; 735 bd->swap_time = seconds_since_boot(); 736 } 737 spin_unlock(&blocked_delegations_lock); 738 } 739 hash = jhash(&fh->fh_base, fh->fh_size, 0); 740 if (test_bit(hash&255, bd->set[0]) && 741 test_bit((hash>>8)&255, bd->set[0]) && 742 test_bit((hash>>16)&255, bd->set[0])) 743 return 1; 744 745 if (test_bit(hash&255, bd->set[1]) && 746 test_bit((hash>>8)&255, bd->set[1]) && 747 test_bit((hash>>16)&255, bd->set[1])) 748 return 1; 749 750 return 0; 751 } 752 753 static void block_delegations(struct knfsd_fh *fh) 754 { 755 u32 hash; 756 struct bloom_pair *bd = &blocked_delegations; 757 758 hash = jhash(&fh->fh_base, fh->fh_size, 0); 759 760 spin_lock(&blocked_delegations_lock); 761 __set_bit(hash&255, bd->set[bd->new]); 762 __set_bit((hash>>8)&255, bd->set[bd->new]); 763 __set_bit((hash>>16)&255, bd->set[bd->new]); 764 if (bd->entries == 0) 765 bd->swap_time = seconds_since_boot(); 766 bd->entries += 1; 767 spin_unlock(&blocked_delegations_lock); 768 } 769 770 static struct nfs4_delegation * 771 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, 772 struct nfs4_clnt_odstate *odstate) 773 { 774 struct nfs4_delegation *dp; 775 long n; 776 777 dprintk("NFSD alloc_init_deleg\n"); 778 n = atomic_long_inc_return(&num_delegations); 779 if (n < 0 || n > max_delegations) 780 goto out_dec; 781 if (delegation_blocked(¤t_fh->fh_handle)) 782 goto out_dec; 783 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); 784 if (dp == NULL) 785 goto out_dec; 786 787 /* 788 * delegation seqid's are never incremented. The 4.1 special 789 * meaning of seqid 0 isn't meaningful, really, but let's avoid 790 * 0 anyway just for consistency and use 1: 791 */ 792 dp->dl_stid.sc_stateid.si_generation = 1; 793 INIT_LIST_HEAD(&dp->dl_perfile); 794 INIT_LIST_HEAD(&dp->dl_perclnt); 795 INIT_LIST_HEAD(&dp->dl_recall_lru); 796 dp->dl_clnt_odstate = odstate; 797 get_clnt_odstate(odstate); 798 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 799 dp->dl_retries = 1; 800 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 801 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 802 return dp; 803 out_dec: 804 atomic_long_dec(&num_delegations); 805 return NULL; 806 } 807 808 void 809 nfs4_put_stid(struct nfs4_stid *s) 810 { 811 struct nfs4_file *fp = s->sc_file; 812 struct nfs4_client *clp = s->sc_client; 813 814 might_lock(&clp->cl_lock); 815 816 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 817 wake_up_all(&close_wq); 818 return; 819 } 820 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 821 spin_unlock(&clp->cl_lock); 822 s->sc_free(s); 823 if (fp) 824 put_nfs4_file(fp); 825 } 826 827 void 828 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) 829 { 830 stateid_t *src = &stid->sc_stateid; 831 832 spin_lock(&stid->sc_lock); 833 if (unlikely(++src->si_generation == 0)) 834 src->si_generation = 1; 835 memcpy(dst, src, sizeof(*dst)); 836 spin_unlock(&stid->sc_lock); 837 } 838 839 static void nfs4_put_deleg_lease(struct nfs4_file *fp) 840 { 841 struct file *filp = NULL; 842 843 spin_lock(&fp->fi_lock); 844 if (fp->fi_deleg_file && --fp->fi_delegees == 0) 845 swap(filp, fp->fi_deleg_file); 846 spin_unlock(&fp->fi_lock); 847 848 if (filp) { 849 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp); 850 fput(filp); 851 } 852 } 853 854 void nfs4_unhash_stid(struct nfs4_stid *s) 855 { 856 s->sc_type = 0; 857 } 858 859 /** 860 * nfs4_get_existing_delegation - Discover if this delegation already exists 861 * @clp: a pointer to the nfs4_client we're granting a delegation to 862 * @fp: a pointer to the nfs4_file we're granting a delegation on 863 * 864 * Return: 865 * On success: NULL if an existing delegation was not found. 866 * 867 * On error: -EAGAIN if one was previously granted to this nfs4_client 868 * for this nfs4_file. 869 * 870 */ 871 872 static int 873 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp) 874 { 875 struct nfs4_delegation *searchdp = NULL; 876 struct nfs4_client *searchclp = NULL; 877 878 lockdep_assert_held(&state_lock); 879 lockdep_assert_held(&fp->fi_lock); 880 881 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { 882 searchclp = searchdp->dl_stid.sc_client; 883 if (clp == searchclp) { 884 return -EAGAIN; 885 } 886 } 887 return 0; 888 } 889 890 /** 891 * hash_delegation_locked - Add a delegation to the appropriate lists 892 * @dp: a pointer to the nfs4_delegation we are adding. 893 * @fp: a pointer to the nfs4_file we're granting a delegation on 894 * 895 * Return: 896 * On success: NULL if the delegation was successfully hashed. 897 * 898 * On error: -EAGAIN if one was previously granted to this 899 * nfs4_client for this nfs4_file. Delegation is not hashed. 900 * 901 */ 902 903 static int 904 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 905 { 906 int status; 907 struct nfs4_client *clp = dp->dl_stid.sc_client; 908 909 lockdep_assert_held(&state_lock); 910 lockdep_assert_held(&fp->fi_lock); 911 912 status = nfs4_get_existing_delegation(clp, fp); 913 if (status) 914 return status; 915 ++fp->fi_delegees; 916 atomic_inc(&dp->dl_stid.sc_count); 917 dp->dl_stid.sc_type = NFS4_DELEG_STID; 918 list_add(&dp->dl_perfile, &fp->fi_delegations); 919 list_add(&dp->dl_perclnt, &clp->cl_delegations); 920 return 0; 921 } 922 923 static bool 924 unhash_delegation_locked(struct nfs4_delegation *dp) 925 { 926 struct nfs4_file *fp = dp->dl_stid.sc_file; 927 928 lockdep_assert_held(&state_lock); 929 930 if (list_empty(&dp->dl_perfile)) 931 return false; 932 933 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; 934 /* Ensure that deleg break won't try to requeue it */ 935 ++dp->dl_time; 936 spin_lock(&fp->fi_lock); 937 list_del_init(&dp->dl_perclnt); 938 list_del_init(&dp->dl_recall_lru); 939 list_del_init(&dp->dl_perfile); 940 spin_unlock(&fp->fi_lock); 941 return true; 942 } 943 944 static void destroy_delegation(struct nfs4_delegation *dp) 945 { 946 bool unhashed; 947 948 spin_lock(&state_lock); 949 unhashed = unhash_delegation_locked(dp); 950 spin_unlock(&state_lock); 951 if (unhashed) { 952 put_clnt_odstate(dp->dl_clnt_odstate); 953 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 954 nfs4_put_stid(&dp->dl_stid); 955 } 956 } 957 958 static void revoke_delegation(struct nfs4_delegation *dp) 959 { 960 struct nfs4_client *clp = dp->dl_stid.sc_client; 961 962 WARN_ON(!list_empty(&dp->dl_recall_lru)); 963 964 put_clnt_odstate(dp->dl_clnt_odstate); 965 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 966 967 if (clp->cl_minorversion == 0) 968 nfs4_put_stid(&dp->dl_stid); 969 else { 970 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 971 spin_lock(&clp->cl_lock); 972 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 973 spin_unlock(&clp->cl_lock); 974 } 975 } 976 977 /* 978 * SETCLIENTID state 979 */ 980 981 static unsigned int clientid_hashval(u32 id) 982 { 983 return id & CLIENT_HASH_MASK; 984 } 985 986 static unsigned int clientstr_hashval(const char *name) 987 { 988 return opaque_hashval(name, 8) & CLIENT_HASH_MASK; 989 } 990 991 /* 992 * We store the NONE, READ, WRITE, and BOTH bits separately in the 993 * st_{access,deny}_bmap field of the stateid, in order to track not 994 * only what share bits are currently in force, but also what 995 * combinations of share bits previous opens have used. This allows us 996 * to enforce the recommendation of rfc 3530 14.2.19 that the server 997 * return an error if the client attempt to downgrade to a combination 998 * of share bits not explicable by closing some of its previous opens. 999 * 1000 * XXX: This enforcement is actually incomplete, since we don't keep 1001 * track of access/deny bit combinations; so, e.g., we allow: 1002 * 1003 * OPEN allow read, deny write 1004 * OPEN allow both, deny none 1005 * DOWNGRADE allow read, deny none 1006 * 1007 * which we should reject. 1008 */ 1009 static unsigned int 1010 bmap_to_share_mode(unsigned long bmap) { 1011 int i; 1012 unsigned int access = 0; 1013 1014 for (i = 1; i < 4; i++) { 1015 if (test_bit(i, &bmap)) 1016 access |= i; 1017 } 1018 return access; 1019 } 1020 1021 /* set share access for a given stateid */ 1022 static inline void 1023 set_access(u32 access, struct nfs4_ol_stateid *stp) 1024 { 1025 unsigned char mask = 1 << access; 1026 1027 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 1028 stp->st_access_bmap |= mask; 1029 } 1030 1031 /* clear share access for a given stateid */ 1032 static inline void 1033 clear_access(u32 access, struct nfs4_ol_stateid *stp) 1034 { 1035 unsigned char mask = 1 << access; 1036 1037 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 1038 stp->st_access_bmap &= ~mask; 1039 } 1040 1041 /* test whether a given stateid has access */ 1042 static inline bool 1043 test_access(u32 access, struct nfs4_ol_stateid *stp) 1044 { 1045 unsigned char mask = 1 << access; 1046 1047 return (bool)(stp->st_access_bmap & mask); 1048 } 1049 1050 /* set share deny for a given stateid */ 1051 static inline void 1052 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 1053 { 1054 unsigned char mask = 1 << deny; 1055 1056 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 1057 stp->st_deny_bmap |= mask; 1058 } 1059 1060 /* clear share deny for a given stateid */ 1061 static inline void 1062 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 1063 { 1064 unsigned char mask = 1 << deny; 1065 1066 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 1067 stp->st_deny_bmap &= ~mask; 1068 } 1069 1070 /* test whether a given stateid is denying specific access */ 1071 static inline bool 1072 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 1073 { 1074 unsigned char mask = 1 << deny; 1075 1076 return (bool)(stp->st_deny_bmap & mask); 1077 } 1078 1079 static int nfs4_access_to_omode(u32 access) 1080 { 1081 switch (access & NFS4_SHARE_ACCESS_BOTH) { 1082 case NFS4_SHARE_ACCESS_READ: 1083 return O_RDONLY; 1084 case NFS4_SHARE_ACCESS_WRITE: 1085 return O_WRONLY; 1086 case NFS4_SHARE_ACCESS_BOTH: 1087 return O_RDWR; 1088 } 1089 WARN_ON_ONCE(1); 1090 return O_RDONLY; 1091 } 1092 1093 /* 1094 * A stateid that had a deny mode associated with it is being released 1095 * or downgraded. Recalculate the deny mode on the file. 1096 */ 1097 static void 1098 recalculate_deny_mode(struct nfs4_file *fp) 1099 { 1100 struct nfs4_ol_stateid *stp; 1101 1102 spin_lock(&fp->fi_lock); 1103 fp->fi_share_deny = 0; 1104 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) 1105 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 1106 spin_unlock(&fp->fi_lock); 1107 } 1108 1109 static void 1110 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 1111 { 1112 int i; 1113 bool change = false; 1114 1115 for (i = 1; i < 4; i++) { 1116 if ((i & deny) != i) { 1117 change = true; 1118 clear_deny(i, stp); 1119 } 1120 } 1121 1122 /* Recalculate per-file deny mode if there was a change */ 1123 if (change) 1124 recalculate_deny_mode(stp->st_stid.sc_file); 1125 } 1126 1127 /* release all access and file references for a given stateid */ 1128 static void 1129 release_all_access(struct nfs4_ol_stateid *stp) 1130 { 1131 int i; 1132 struct nfs4_file *fp = stp->st_stid.sc_file; 1133 1134 if (fp && stp->st_deny_bmap != 0) 1135 recalculate_deny_mode(fp); 1136 1137 for (i = 1; i < 4; i++) { 1138 if (test_access(i, stp)) 1139 nfs4_file_put_access(stp->st_stid.sc_file, i); 1140 clear_access(i, stp); 1141 } 1142 } 1143 1144 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) 1145 { 1146 kfree(sop->so_owner.data); 1147 sop->so_ops->so_free(sop); 1148 } 1149 1150 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 1151 { 1152 struct nfs4_client *clp = sop->so_client; 1153 1154 might_lock(&clp->cl_lock); 1155 1156 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1157 return; 1158 sop->so_ops->so_unhash(sop); 1159 spin_unlock(&clp->cl_lock); 1160 nfs4_free_stateowner(sop); 1161 } 1162 1163 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1164 { 1165 struct nfs4_file *fp = stp->st_stid.sc_file; 1166 1167 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1168 1169 if (list_empty(&stp->st_perfile)) 1170 return false; 1171 1172 spin_lock(&fp->fi_lock); 1173 list_del_init(&stp->st_perfile); 1174 spin_unlock(&fp->fi_lock); 1175 list_del(&stp->st_perstateowner); 1176 return true; 1177 } 1178 1179 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1180 { 1181 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1182 1183 put_clnt_odstate(stp->st_clnt_odstate); 1184 release_all_access(stp); 1185 if (stp->st_stateowner) 1186 nfs4_put_stateowner(stp->st_stateowner); 1187 kmem_cache_free(stateid_slab, stid); 1188 } 1189 1190 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1191 { 1192 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1193 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1194 struct file *file; 1195 1196 file = find_any_file(stp->st_stid.sc_file); 1197 if (file) 1198 filp_close(file, (fl_owner_t)lo); 1199 nfs4_free_ol_stateid(stid); 1200 } 1201 1202 /* 1203 * Put the persistent reference to an already unhashed generic stateid, while 1204 * holding the cl_lock. If it's the last reference, then put it onto the 1205 * reaplist for later destruction. 1206 */ 1207 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1208 struct list_head *reaplist) 1209 { 1210 struct nfs4_stid *s = &stp->st_stid; 1211 struct nfs4_client *clp = s->sc_client; 1212 1213 lockdep_assert_held(&clp->cl_lock); 1214 1215 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1216 1217 if (!atomic_dec_and_test(&s->sc_count)) { 1218 wake_up_all(&close_wq); 1219 return; 1220 } 1221 1222 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1223 list_add(&stp->st_locks, reaplist); 1224 } 1225 1226 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1227 { 1228 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1229 1230 list_del_init(&stp->st_locks); 1231 nfs4_unhash_stid(&stp->st_stid); 1232 return unhash_ol_stateid(stp); 1233 } 1234 1235 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1236 { 1237 struct nfs4_client *clp = stp->st_stid.sc_client; 1238 bool unhashed; 1239 1240 spin_lock(&clp->cl_lock); 1241 unhashed = unhash_lock_stateid(stp); 1242 spin_unlock(&clp->cl_lock); 1243 if (unhashed) 1244 nfs4_put_stid(&stp->st_stid); 1245 } 1246 1247 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1248 { 1249 struct nfs4_client *clp = lo->lo_owner.so_client; 1250 1251 lockdep_assert_held(&clp->cl_lock); 1252 1253 list_del_init(&lo->lo_owner.so_strhash); 1254 } 1255 1256 /* 1257 * Free a list of generic stateids that were collected earlier after being 1258 * fully unhashed. 1259 */ 1260 static void 1261 free_ol_stateid_reaplist(struct list_head *reaplist) 1262 { 1263 struct nfs4_ol_stateid *stp; 1264 struct nfs4_file *fp; 1265 1266 might_sleep(); 1267 1268 while (!list_empty(reaplist)) { 1269 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1270 st_locks); 1271 list_del(&stp->st_locks); 1272 fp = stp->st_stid.sc_file; 1273 stp->st_stid.sc_free(&stp->st_stid); 1274 if (fp) 1275 put_nfs4_file(fp); 1276 } 1277 } 1278 1279 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1280 struct list_head *reaplist) 1281 { 1282 struct nfs4_ol_stateid *stp; 1283 1284 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); 1285 1286 while (!list_empty(&open_stp->st_locks)) { 1287 stp = list_entry(open_stp->st_locks.next, 1288 struct nfs4_ol_stateid, st_locks); 1289 WARN_ON(!unhash_lock_stateid(stp)); 1290 put_ol_stateid_locked(stp, reaplist); 1291 } 1292 } 1293 1294 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, 1295 struct list_head *reaplist) 1296 { 1297 bool unhashed; 1298 1299 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1300 1301 unhashed = unhash_ol_stateid(stp); 1302 release_open_stateid_locks(stp, reaplist); 1303 return unhashed; 1304 } 1305 1306 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1307 { 1308 LIST_HEAD(reaplist); 1309 1310 spin_lock(&stp->st_stid.sc_client->cl_lock); 1311 if (unhash_open_stateid(stp, &reaplist)) 1312 put_ol_stateid_locked(stp, &reaplist); 1313 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1314 free_ol_stateid_reaplist(&reaplist); 1315 } 1316 1317 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1318 { 1319 struct nfs4_client *clp = oo->oo_owner.so_client; 1320 1321 lockdep_assert_held(&clp->cl_lock); 1322 1323 list_del_init(&oo->oo_owner.so_strhash); 1324 list_del_init(&oo->oo_perclient); 1325 } 1326 1327 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1328 { 1329 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1330 nfsd_net_id); 1331 struct nfs4_ol_stateid *s; 1332 1333 spin_lock(&nn->client_lock); 1334 s = oo->oo_last_closed_stid; 1335 if (s) { 1336 list_del_init(&oo->oo_close_lru); 1337 oo->oo_last_closed_stid = NULL; 1338 } 1339 spin_unlock(&nn->client_lock); 1340 if (s) 1341 nfs4_put_stid(&s->st_stid); 1342 } 1343 1344 static void release_openowner(struct nfs4_openowner *oo) 1345 { 1346 struct nfs4_ol_stateid *stp; 1347 struct nfs4_client *clp = oo->oo_owner.so_client; 1348 struct list_head reaplist; 1349 1350 INIT_LIST_HEAD(&reaplist); 1351 1352 spin_lock(&clp->cl_lock); 1353 unhash_openowner_locked(oo); 1354 while (!list_empty(&oo->oo_owner.so_stateids)) { 1355 stp = list_first_entry(&oo->oo_owner.so_stateids, 1356 struct nfs4_ol_stateid, st_perstateowner); 1357 if (unhash_open_stateid(stp, &reaplist)) 1358 put_ol_stateid_locked(stp, &reaplist); 1359 } 1360 spin_unlock(&clp->cl_lock); 1361 free_ol_stateid_reaplist(&reaplist); 1362 release_last_closed_stateid(oo); 1363 nfs4_put_stateowner(&oo->oo_owner); 1364 } 1365 1366 static inline int 1367 hash_sessionid(struct nfs4_sessionid *sessionid) 1368 { 1369 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1370 1371 return sid->sequence % SESSION_HASH_SIZE; 1372 } 1373 1374 #ifdef CONFIG_SUNRPC_DEBUG 1375 static inline void 1376 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1377 { 1378 u32 *ptr = (u32 *)(&sessionid->data[0]); 1379 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1380 } 1381 #else 1382 static inline void 1383 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1384 { 1385 } 1386 #endif 1387 1388 /* 1389 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1390 * won't be used for replay. 1391 */ 1392 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1393 { 1394 struct nfs4_stateowner *so = cstate->replay_owner; 1395 1396 if (nfserr == nfserr_replay_me) 1397 return; 1398 1399 if (!seqid_mutating_err(ntohl(nfserr))) { 1400 nfsd4_cstate_clear_replay(cstate); 1401 return; 1402 } 1403 if (!so) 1404 return; 1405 if (so->so_is_open_owner) 1406 release_last_closed_stateid(openowner(so)); 1407 so->so_seqid++; 1408 return; 1409 } 1410 1411 static void 1412 gen_sessionid(struct nfsd4_session *ses) 1413 { 1414 struct nfs4_client *clp = ses->se_client; 1415 struct nfsd4_sessionid *sid; 1416 1417 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1418 sid->clientid = clp->cl_clientid; 1419 sid->sequence = current_sessionid++; 1420 sid->reserved = 0; 1421 } 1422 1423 /* 1424 * The protocol defines ca_maxresponssize_cached to include the size of 1425 * the rpc header, but all we need to cache is the data starting after 1426 * the end of the initial SEQUENCE operation--the rest we regenerate 1427 * each time. Therefore we can advertise a ca_maxresponssize_cached 1428 * value that is the number of bytes in our cache plus a few additional 1429 * bytes. In order to stay on the safe side, and not promise more than 1430 * we can cache, those additional bytes must be the minimum possible: 24 1431 * bytes of rpc header (xid through accept state, with AUTH_NULL 1432 * verifier), 12 for the compound header (with zero-length tag), and 44 1433 * for the SEQUENCE op response: 1434 */ 1435 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1436 1437 static void 1438 free_session_slots(struct nfsd4_session *ses) 1439 { 1440 int i; 1441 1442 for (i = 0; i < ses->se_fchannel.maxreqs; i++) 1443 kfree(ses->se_slots[i]); 1444 } 1445 1446 /* 1447 * We don't actually need to cache the rpc and session headers, so we 1448 * can allocate a little less for each slot: 1449 */ 1450 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 1451 { 1452 u32 size; 1453 1454 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 1455 size = 0; 1456 else 1457 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 1458 return size + sizeof(struct nfsd4_slot); 1459 } 1460 1461 /* 1462 * XXX: If we run out of reserved DRC memory we could (up to a point) 1463 * re-negotiate active sessions and reduce their slot usage to make 1464 * room for new connections. For now we just fail the create session. 1465 */ 1466 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) 1467 { 1468 u32 slotsize = slot_bytes(ca); 1469 u32 num = ca->maxreqs; 1470 int avail; 1471 1472 spin_lock(&nfsd_drc_lock); 1473 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, 1474 nfsd_drc_max_mem - nfsd_drc_mem_used); 1475 num = min_t(int, num, avail / slotsize); 1476 nfsd_drc_mem_used += num * slotsize; 1477 spin_unlock(&nfsd_drc_lock); 1478 1479 return num; 1480 } 1481 1482 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 1483 { 1484 int slotsize = slot_bytes(ca); 1485 1486 spin_lock(&nfsd_drc_lock); 1487 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 1488 spin_unlock(&nfsd_drc_lock); 1489 } 1490 1491 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 1492 struct nfsd4_channel_attrs *battrs) 1493 { 1494 int numslots = fattrs->maxreqs; 1495 int slotsize = slot_bytes(fattrs); 1496 struct nfsd4_session *new; 1497 int mem, i; 1498 1499 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 1500 + sizeof(struct nfsd4_session) > PAGE_SIZE); 1501 mem = numslots * sizeof(struct nfsd4_slot *); 1502 1503 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 1504 if (!new) 1505 return NULL; 1506 /* allocate each struct nfsd4_slot and data cache in one piece */ 1507 for (i = 0; i < numslots; i++) { 1508 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 1509 if (!new->se_slots[i]) 1510 goto out_free; 1511 } 1512 1513 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 1514 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); 1515 1516 return new; 1517 out_free: 1518 while (i--) 1519 kfree(new->se_slots[i]); 1520 kfree(new); 1521 return NULL; 1522 } 1523 1524 static void free_conn(struct nfsd4_conn *c) 1525 { 1526 svc_xprt_put(c->cn_xprt); 1527 kfree(c); 1528 } 1529 1530 static void nfsd4_conn_lost(struct svc_xpt_user *u) 1531 { 1532 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 1533 struct nfs4_client *clp = c->cn_session->se_client; 1534 1535 spin_lock(&clp->cl_lock); 1536 if (!list_empty(&c->cn_persession)) { 1537 list_del(&c->cn_persession); 1538 free_conn(c); 1539 } 1540 nfsd4_probe_callback(clp); 1541 spin_unlock(&clp->cl_lock); 1542 } 1543 1544 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 1545 { 1546 struct nfsd4_conn *conn; 1547 1548 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 1549 if (!conn) 1550 return NULL; 1551 svc_xprt_get(rqstp->rq_xprt); 1552 conn->cn_xprt = rqstp->rq_xprt; 1553 conn->cn_flags = flags; 1554 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 1555 return conn; 1556 } 1557 1558 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1559 { 1560 conn->cn_session = ses; 1561 list_add(&conn->cn_persession, &ses->se_conns); 1562 } 1563 1564 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1565 { 1566 struct nfs4_client *clp = ses->se_client; 1567 1568 spin_lock(&clp->cl_lock); 1569 __nfsd4_hash_conn(conn, ses); 1570 spin_unlock(&clp->cl_lock); 1571 } 1572 1573 static int nfsd4_register_conn(struct nfsd4_conn *conn) 1574 { 1575 conn->cn_xpt_user.callback = nfsd4_conn_lost; 1576 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 1577 } 1578 1579 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 1580 { 1581 int ret; 1582 1583 nfsd4_hash_conn(conn, ses); 1584 ret = nfsd4_register_conn(conn); 1585 if (ret) 1586 /* oops; xprt is already down: */ 1587 nfsd4_conn_lost(&conn->cn_xpt_user); 1588 /* We may have gained or lost a callback channel: */ 1589 nfsd4_probe_callback_sync(ses->se_client); 1590 } 1591 1592 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 1593 { 1594 u32 dir = NFS4_CDFC4_FORE; 1595 1596 if (cses->flags & SESSION4_BACK_CHAN) 1597 dir |= NFS4_CDFC4_BACK; 1598 return alloc_conn(rqstp, dir); 1599 } 1600 1601 /* must be called under client_lock */ 1602 static void nfsd4_del_conns(struct nfsd4_session *s) 1603 { 1604 struct nfs4_client *clp = s->se_client; 1605 struct nfsd4_conn *c; 1606 1607 spin_lock(&clp->cl_lock); 1608 while (!list_empty(&s->se_conns)) { 1609 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 1610 list_del_init(&c->cn_persession); 1611 spin_unlock(&clp->cl_lock); 1612 1613 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 1614 free_conn(c); 1615 1616 spin_lock(&clp->cl_lock); 1617 } 1618 spin_unlock(&clp->cl_lock); 1619 } 1620 1621 static void __free_session(struct nfsd4_session *ses) 1622 { 1623 free_session_slots(ses); 1624 kfree(ses); 1625 } 1626 1627 static void free_session(struct nfsd4_session *ses) 1628 { 1629 nfsd4_del_conns(ses); 1630 nfsd4_put_drc_mem(&ses->se_fchannel); 1631 __free_session(ses); 1632 } 1633 1634 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 1635 { 1636 int idx; 1637 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1638 1639 new->se_client = clp; 1640 gen_sessionid(new); 1641 1642 INIT_LIST_HEAD(&new->se_conns); 1643 1644 new->se_cb_seq_nr = 1; 1645 new->se_flags = cses->flags; 1646 new->se_cb_prog = cses->callback_prog; 1647 new->se_cb_sec = cses->cb_sec; 1648 atomic_set(&new->se_ref, 0); 1649 idx = hash_sessionid(&new->se_sessionid); 1650 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1651 spin_lock(&clp->cl_lock); 1652 list_add(&new->se_perclnt, &clp->cl_sessions); 1653 spin_unlock(&clp->cl_lock); 1654 1655 { 1656 struct sockaddr *sa = svc_addr(rqstp); 1657 /* 1658 * This is a little silly; with sessions there's no real 1659 * use for the callback address. Use the peer address 1660 * as a reasonable default for now, but consider fixing 1661 * the rpc client not to require an address in the 1662 * future: 1663 */ 1664 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 1665 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 1666 } 1667 } 1668 1669 /* caller must hold client_lock */ 1670 static struct nfsd4_session * 1671 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1672 { 1673 struct nfsd4_session *elem; 1674 int idx; 1675 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1676 1677 lockdep_assert_held(&nn->client_lock); 1678 1679 dump_sessionid(__func__, sessionid); 1680 idx = hash_sessionid(sessionid); 1681 /* Search in the appropriate list */ 1682 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 1683 if (!memcmp(elem->se_sessionid.data, sessionid->data, 1684 NFS4_MAX_SESSIONID_LEN)) { 1685 return elem; 1686 } 1687 } 1688 1689 dprintk("%s: session not found\n", __func__); 1690 return NULL; 1691 } 1692 1693 static struct nfsd4_session * 1694 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 1695 __be32 *ret) 1696 { 1697 struct nfsd4_session *session; 1698 __be32 status = nfserr_badsession; 1699 1700 session = __find_in_sessionid_hashtbl(sessionid, net); 1701 if (!session) 1702 goto out; 1703 status = nfsd4_get_session_locked(session); 1704 if (status) 1705 session = NULL; 1706 out: 1707 *ret = status; 1708 return session; 1709 } 1710 1711 /* caller must hold client_lock */ 1712 static void 1713 unhash_session(struct nfsd4_session *ses) 1714 { 1715 struct nfs4_client *clp = ses->se_client; 1716 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1717 1718 lockdep_assert_held(&nn->client_lock); 1719 1720 list_del(&ses->se_hash); 1721 spin_lock(&ses->se_client->cl_lock); 1722 list_del(&ses->se_perclnt); 1723 spin_unlock(&ses->se_client->cl_lock); 1724 } 1725 1726 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1727 static int 1728 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1729 { 1730 /* 1731 * We're assuming the clid was not given out from a boot 1732 * precisely 2^32 (about 136 years) before this one. That seems 1733 * a safe assumption: 1734 */ 1735 if (clid->cl_boot == (u32)nn->boot_time) 1736 return 0; 1737 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 1738 clid->cl_boot, clid->cl_id, nn->boot_time); 1739 return 1; 1740 } 1741 1742 /* 1743 * XXX Should we use a slab cache ? 1744 * This type of memory management is somewhat inefficient, but we use it 1745 * anyway since SETCLIENTID is not a common operation. 1746 */ 1747 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1748 { 1749 struct nfs4_client *clp; 1750 int i; 1751 1752 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 1753 if (clp == NULL) 1754 return NULL; 1755 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); 1756 if (clp->cl_name.data == NULL) 1757 goto err_no_name; 1758 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * 1759 OWNER_HASH_SIZE, GFP_KERNEL); 1760 if (!clp->cl_ownerstr_hashtbl) 1761 goto err_no_hashtbl; 1762 for (i = 0; i < OWNER_HASH_SIZE; i++) 1763 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 1764 clp->cl_name.len = name.len; 1765 INIT_LIST_HEAD(&clp->cl_sessions); 1766 idr_init(&clp->cl_stateids); 1767 atomic_set(&clp->cl_refcount, 0); 1768 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1769 INIT_LIST_HEAD(&clp->cl_idhash); 1770 INIT_LIST_HEAD(&clp->cl_openowners); 1771 INIT_LIST_HEAD(&clp->cl_delegations); 1772 INIT_LIST_HEAD(&clp->cl_lru); 1773 INIT_LIST_HEAD(&clp->cl_revoked); 1774 #ifdef CONFIG_NFSD_PNFS 1775 INIT_LIST_HEAD(&clp->cl_lo_states); 1776 #endif 1777 spin_lock_init(&clp->cl_lock); 1778 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1779 return clp; 1780 err_no_hashtbl: 1781 kfree(clp->cl_name.data); 1782 err_no_name: 1783 kfree(clp); 1784 return NULL; 1785 } 1786 1787 static void 1788 free_client(struct nfs4_client *clp) 1789 { 1790 while (!list_empty(&clp->cl_sessions)) { 1791 struct nfsd4_session *ses; 1792 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1793 se_perclnt); 1794 list_del(&ses->se_perclnt); 1795 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 1796 free_session(ses); 1797 } 1798 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 1799 free_svc_cred(&clp->cl_cred); 1800 kfree(clp->cl_ownerstr_hashtbl); 1801 kfree(clp->cl_name.data); 1802 idr_destroy(&clp->cl_stateids); 1803 kfree(clp); 1804 } 1805 1806 /* must be called under the client_lock */ 1807 static void 1808 unhash_client_locked(struct nfs4_client *clp) 1809 { 1810 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1811 struct nfsd4_session *ses; 1812 1813 lockdep_assert_held(&nn->client_lock); 1814 1815 /* Mark the client as expired! */ 1816 clp->cl_time = 0; 1817 /* Make it invisible */ 1818 if (!list_empty(&clp->cl_idhash)) { 1819 list_del_init(&clp->cl_idhash); 1820 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 1821 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 1822 else 1823 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1824 } 1825 list_del_init(&clp->cl_lru); 1826 spin_lock(&clp->cl_lock); 1827 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1828 list_del_init(&ses->se_hash); 1829 spin_unlock(&clp->cl_lock); 1830 } 1831 1832 static void 1833 unhash_client(struct nfs4_client *clp) 1834 { 1835 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1836 1837 spin_lock(&nn->client_lock); 1838 unhash_client_locked(clp); 1839 spin_unlock(&nn->client_lock); 1840 } 1841 1842 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 1843 { 1844 if (atomic_read(&clp->cl_refcount)) 1845 return nfserr_jukebox; 1846 unhash_client_locked(clp); 1847 return nfs_ok; 1848 } 1849 1850 static void 1851 __destroy_client(struct nfs4_client *clp) 1852 { 1853 struct nfs4_openowner *oo; 1854 struct nfs4_delegation *dp; 1855 struct list_head reaplist; 1856 1857 INIT_LIST_HEAD(&reaplist); 1858 spin_lock(&state_lock); 1859 while (!list_empty(&clp->cl_delegations)) { 1860 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 1861 WARN_ON(!unhash_delegation_locked(dp)); 1862 list_add(&dp->dl_recall_lru, &reaplist); 1863 } 1864 spin_unlock(&state_lock); 1865 while (!list_empty(&reaplist)) { 1866 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1867 list_del_init(&dp->dl_recall_lru); 1868 put_clnt_odstate(dp->dl_clnt_odstate); 1869 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 1870 nfs4_put_stid(&dp->dl_stid); 1871 } 1872 while (!list_empty(&clp->cl_revoked)) { 1873 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 1874 list_del_init(&dp->dl_recall_lru); 1875 nfs4_put_stid(&dp->dl_stid); 1876 } 1877 while (!list_empty(&clp->cl_openowners)) { 1878 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1879 nfs4_get_stateowner(&oo->oo_owner); 1880 release_openowner(oo); 1881 } 1882 nfsd4_return_all_client_layouts(clp); 1883 nfsd4_shutdown_callback(clp); 1884 if (clp->cl_cb_conn.cb_xprt) 1885 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 1886 free_client(clp); 1887 } 1888 1889 static void 1890 destroy_client(struct nfs4_client *clp) 1891 { 1892 unhash_client(clp); 1893 __destroy_client(clp); 1894 } 1895 1896 static void expire_client(struct nfs4_client *clp) 1897 { 1898 unhash_client(clp); 1899 nfsd4_client_record_remove(clp); 1900 __destroy_client(clp); 1901 } 1902 1903 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 1904 { 1905 memcpy(target->cl_verifier.data, source->data, 1906 sizeof(target->cl_verifier.data)); 1907 } 1908 1909 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 1910 { 1911 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 1912 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 1913 } 1914 1915 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 1916 { 1917 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); 1918 target->cr_raw_principal = kstrdup(source->cr_raw_principal, 1919 GFP_KERNEL); 1920 if ((source->cr_principal && ! target->cr_principal) || 1921 (source->cr_raw_principal && ! target->cr_raw_principal)) 1922 return -ENOMEM; 1923 1924 target->cr_flavor = source->cr_flavor; 1925 target->cr_uid = source->cr_uid; 1926 target->cr_gid = source->cr_gid; 1927 target->cr_group_info = source->cr_group_info; 1928 get_group_info(target->cr_group_info); 1929 target->cr_gss_mech = source->cr_gss_mech; 1930 if (source->cr_gss_mech) 1931 gss_mech_get(source->cr_gss_mech); 1932 return 0; 1933 } 1934 1935 static int 1936 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 1937 { 1938 if (o1->len < o2->len) 1939 return -1; 1940 if (o1->len > o2->len) 1941 return 1; 1942 return memcmp(o1->data, o2->data, o1->len); 1943 } 1944 1945 static int same_name(const char *n1, const char *n2) 1946 { 1947 return 0 == memcmp(n1, n2, HEXDIR_LEN); 1948 } 1949 1950 static int 1951 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 1952 { 1953 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 1954 } 1955 1956 static int 1957 same_clid(clientid_t *cl1, clientid_t *cl2) 1958 { 1959 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1960 } 1961 1962 static bool groups_equal(struct group_info *g1, struct group_info *g2) 1963 { 1964 int i; 1965 1966 if (g1->ngroups != g2->ngroups) 1967 return false; 1968 for (i=0; i<g1->ngroups; i++) 1969 if (!gid_eq(g1->gid[i], g2->gid[i])) 1970 return false; 1971 return true; 1972 } 1973 1974 /* 1975 * RFC 3530 language requires clid_inuse be returned when the 1976 * "principal" associated with a requests differs from that previously 1977 * used. We use uid, gid's, and gss principal string as our best 1978 * approximation. We also don't want to allow non-gss use of a client 1979 * established using gss: in theory cr_principal should catch that 1980 * change, but in practice cr_principal can be null even in the gss case 1981 * since gssd doesn't always pass down a principal string. 1982 */ 1983 static bool is_gss_cred(struct svc_cred *cr) 1984 { 1985 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 1986 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 1987 } 1988 1989 1990 static bool 1991 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1992 { 1993 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 1994 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 1995 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 1996 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 1997 return false; 1998 if (cr1->cr_principal == cr2->cr_principal) 1999 return true; 2000 if (!cr1->cr_principal || !cr2->cr_principal) 2001 return false; 2002 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 2003 } 2004 2005 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 2006 { 2007 struct svc_cred *cr = &rqstp->rq_cred; 2008 u32 service; 2009 2010 if (!cr->cr_gss_mech) 2011 return false; 2012 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 2013 return service == RPC_GSS_SVC_INTEGRITY || 2014 service == RPC_GSS_SVC_PRIVACY; 2015 } 2016 2017 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 2018 { 2019 struct svc_cred *cr = &rqstp->rq_cred; 2020 2021 if (!cl->cl_mach_cred) 2022 return true; 2023 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 2024 return false; 2025 if (!svc_rqst_integrity_protected(rqstp)) 2026 return false; 2027 if (cl->cl_cred.cr_raw_principal) 2028 return 0 == strcmp(cl->cl_cred.cr_raw_principal, 2029 cr->cr_raw_principal); 2030 if (!cr->cr_principal) 2031 return false; 2032 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 2033 } 2034 2035 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 2036 { 2037 __be32 verf[2]; 2038 2039 /* 2040 * This is opaque to client, so no need to byte-swap. Use 2041 * __force to keep sparse happy 2042 */ 2043 verf[0] = (__force __be32)get_seconds(); 2044 verf[1] = (__force __be32)nn->clverifier_counter++; 2045 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2046 } 2047 2048 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2049 { 2050 clp->cl_clientid.cl_boot = nn->boot_time; 2051 clp->cl_clientid.cl_id = nn->clientid_counter++; 2052 gen_confirm(clp, nn); 2053 } 2054 2055 static struct nfs4_stid * 2056 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 2057 { 2058 struct nfs4_stid *ret; 2059 2060 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 2061 if (!ret || !ret->sc_type) 2062 return NULL; 2063 return ret; 2064 } 2065 2066 static struct nfs4_stid * 2067 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 2068 { 2069 struct nfs4_stid *s; 2070 2071 spin_lock(&cl->cl_lock); 2072 s = find_stateid_locked(cl, t); 2073 if (s != NULL) { 2074 if (typemask & s->sc_type) 2075 atomic_inc(&s->sc_count); 2076 else 2077 s = NULL; 2078 } 2079 spin_unlock(&cl->cl_lock); 2080 return s; 2081 } 2082 2083 static struct nfs4_client *create_client(struct xdr_netobj name, 2084 struct svc_rqst *rqstp, nfs4_verifier *verf) 2085 { 2086 struct nfs4_client *clp; 2087 struct sockaddr *sa = svc_addr(rqstp); 2088 int ret; 2089 struct net *net = SVC_NET(rqstp); 2090 2091 clp = alloc_client(name); 2092 if (clp == NULL) 2093 return NULL; 2094 2095 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 2096 if (ret) { 2097 free_client(clp); 2098 return NULL; 2099 } 2100 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 2101 clp->cl_time = get_seconds(); 2102 clear_bit(0, &clp->cl_cb_slot_busy); 2103 copy_verf(clp, verf); 2104 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 2105 clp->cl_cb_session = NULL; 2106 clp->net = net; 2107 return clp; 2108 } 2109 2110 static void 2111 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 2112 { 2113 struct rb_node **new = &(root->rb_node), *parent = NULL; 2114 struct nfs4_client *clp; 2115 2116 while (*new) { 2117 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 2118 parent = *new; 2119 2120 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 2121 new = &((*new)->rb_left); 2122 else 2123 new = &((*new)->rb_right); 2124 } 2125 2126 rb_link_node(&new_clp->cl_namenode, parent, new); 2127 rb_insert_color(&new_clp->cl_namenode, root); 2128 } 2129 2130 static struct nfs4_client * 2131 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 2132 { 2133 int cmp; 2134 struct rb_node *node = root->rb_node; 2135 struct nfs4_client *clp; 2136 2137 while (node) { 2138 clp = rb_entry(node, struct nfs4_client, cl_namenode); 2139 cmp = compare_blob(&clp->cl_name, name); 2140 if (cmp > 0) 2141 node = node->rb_left; 2142 else if (cmp < 0) 2143 node = node->rb_right; 2144 else 2145 return clp; 2146 } 2147 return NULL; 2148 } 2149 2150 static void 2151 add_to_unconfirmed(struct nfs4_client *clp) 2152 { 2153 unsigned int idhashval; 2154 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2155 2156 lockdep_assert_held(&nn->client_lock); 2157 2158 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2159 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 2160 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2161 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 2162 renew_client_locked(clp); 2163 } 2164 2165 static void 2166 move_to_confirmed(struct nfs4_client *clp) 2167 { 2168 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2169 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2170 2171 lockdep_assert_held(&nn->client_lock); 2172 2173 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 2174 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 2175 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2176 add_clp_to_name_tree(clp, &nn->conf_name_tree); 2177 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2178 renew_client_locked(clp); 2179 } 2180 2181 static struct nfs4_client * 2182 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 2183 { 2184 struct nfs4_client *clp; 2185 unsigned int idhashval = clientid_hashval(clid->cl_id); 2186 2187 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 2188 if (same_clid(&clp->cl_clientid, clid)) { 2189 if ((bool)clp->cl_minorversion != sessions) 2190 return NULL; 2191 renew_client_locked(clp); 2192 return clp; 2193 } 2194 } 2195 return NULL; 2196 } 2197 2198 static struct nfs4_client * 2199 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2200 { 2201 struct list_head *tbl = nn->conf_id_hashtbl; 2202 2203 lockdep_assert_held(&nn->client_lock); 2204 return find_client_in_id_table(tbl, clid, sessions); 2205 } 2206 2207 static struct nfs4_client * 2208 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2209 { 2210 struct list_head *tbl = nn->unconf_id_hashtbl; 2211 2212 lockdep_assert_held(&nn->client_lock); 2213 return find_client_in_id_table(tbl, clid, sessions); 2214 } 2215 2216 static bool clp_used_exchangeid(struct nfs4_client *clp) 2217 { 2218 return clp->cl_exchange_flags != 0; 2219 } 2220 2221 static struct nfs4_client * 2222 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2223 { 2224 lockdep_assert_held(&nn->client_lock); 2225 return find_clp_in_name_tree(name, &nn->conf_name_tree); 2226 } 2227 2228 static struct nfs4_client * 2229 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2230 { 2231 lockdep_assert_held(&nn->client_lock); 2232 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 2233 } 2234 2235 static void 2236 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 2237 { 2238 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 2239 struct sockaddr *sa = svc_addr(rqstp); 2240 u32 scopeid = rpc_get_scope_id(sa); 2241 unsigned short expected_family; 2242 2243 /* Currently, we only support tcp and tcp6 for the callback channel */ 2244 if (se->se_callback_netid_len == 3 && 2245 !memcmp(se->se_callback_netid_val, "tcp", 3)) 2246 expected_family = AF_INET; 2247 else if (se->se_callback_netid_len == 4 && 2248 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 2249 expected_family = AF_INET6; 2250 else 2251 goto out_err; 2252 2253 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 2254 se->se_callback_addr_len, 2255 (struct sockaddr *)&conn->cb_addr, 2256 sizeof(conn->cb_addr)); 2257 2258 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 2259 goto out_err; 2260 2261 if (conn->cb_addr.ss_family == AF_INET6) 2262 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 2263 2264 conn->cb_prog = se->se_callback_prog; 2265 conn->cb_ident = se->se_callback_ident; 2266 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 2267 return; 2268 out_err: 2269 conn->cb_addr.ss_family = AF_UNSPEC; 2270 conn->cb_addrlen = 0; 2271 dprintk("NFSD: this client (clientid %08x/%08x) " 2272 "will not receive delegations\n", 2273 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 2274 2275 return; 2276 } 2277 2278 /* 2279 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 2280 */ 2281 static void 2282 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 2283 { 2284 struct xdr_buf *buf = resp->xdr.buf; 2285 struct nfsd4_slot *slot = resp->cstate.slot; 2286 unsigned int base; 2287 2288 dprintk("--> %s slot %p\n", __func__, slot); 2289 2290 slot->sl_opcnt = resp->opcnt; 2291 slot->sl_status = resp->cstate.status; 2292 2293 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 2294 if (nfsd4_not_cached(resp)) { 2295 slot->sl_datalen = 0; 2296 return; 2297 } 2298 base = resp->cstate.data_offset; 2299 slot->sl_datalen = buf->len - base; 2300 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 2301 WARN(1, "%s: sessions DRC could not cache compound\n", 2302 __func__); 2303 return; 2304 } 2305 2306 /* 2307 * Encode the replay sequence operation from the slot values. 2308 * If cachethis is FALSE encode the uncached rep error on the next 2309 * operation which sets resp->p and increments resp->opcnt for 2310 * nfs4svc_encode_compoundres. 2311 * 2312 */ 2313 static __be32 2314 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 2315 struct nfsd4_compoundres *resp) 2316 { 2317 struct nfsd4_op *op; 2318 struct nfsd4_slot *slot = resp->cstate.slot; 2319 2320 /* Encode the replayed sequence operation */ 2321 op = &args->ops[resp->opcnt - 1]; 2322 nfsd4_encode_operation(resp, op); 2323 2324 /* Return nfserr_retry_uncached_rep in next operation. */ 2325 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { 2326 op = &args->ops[resp->opcnt++]; 2327 op->status = nfserr_retry_uncached_rep; 2328 nfsd4_encode_operation(resp, op); 2329 } 2330 return op->status; 2331 } 2332 2333 /* 2334 * The sequence operation is not cached because we can use the slot and 2335 * session values. 2336 */ 2337 static __be32 2338 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 2339 struct nfsd4_sequence *seq) 2340 { 2341 struct nfsd4_slot *slot = resp->cstate.slot; 2342 struct xdr_stream *xdr = &resp->xdr; 2343 __be32 *p; 2344 __be32 status; 2345 2346 dprintk("--> %s slot %p\n", __func__, slot); 2347 2348 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 2349 if (status) 2350 return status; 2351 2352 p = xdr_reserve_space(xdr, slot->sl_datalen); 2353 if (!p) { 2354 WARN_ON_ONCE(1); 2355 return nfserr_serverfault; 2356 } 2357 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 2358 xdr_commit_encode(xdr); 2359 2360 resp->opcnt = slot->sl_opcnt; 2361 return slot->sl_status; 2362 } 2363 2364 /* 2365 * Set the exchange_id flags returned by the server. 2366 */ 2367 static void 2368 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 2369 { 2370 #ifdef CONFIG_NFSD_PNFS 2371 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 2372 #else 2373 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 2374 #endif 2375 2376 /* Referrals are supported, Migration is not. */ 2377 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 2378 2379 /* set the wire flags to return to client. */ 2380 clid->flags = new->cl_exchange_flags; 2381 } 2382 2383 static bool client_has_openowners(struct nfs4_client *clp) 2384 { 2385 struct nfs4_openowner *oo; 2386 2387 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { 2388 if (!list_empty(&oo->oo_owner.so_stateids)) 2389 return true; 2390 } 2391 return false; 2392 } 2393 2394 static bool client_has_state(struct nfs4_client *clp) 2395 { 2396 return client_has_openowners(clp) 2397 #ifdef CONFIG_NFSD_PNFS 2398 || !list_empty(&clp->cl_lo_states) 2399 #endif 2400 || !list_empty(&clp->cl_delegations) 2401 || !list_empty(&clp->cl_sessions); 2402 } 2403 2404 __be32 2405 nfsd4_exchange_id(struct svc_rqst *rqstp, 2406 struct nfsd4_compound_state *cstate, 2407 struct nfsd4_exchange_id *exid) 2408 { 2409 struct nfs4_client *conf, *new; 2410 struct nfs4_client *unconf = NULL; 2411 __be32 status; 2412 char addr_str[INET6_ADDRSTRLEN]; 2413 nfs4_verifier verf = exid->verifier; 2414 struct sockaddr *sa = svc_addr(rqstp); 2415 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 2416 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2417 2418 rpc_ntop(sa, addr_str, sizeof(addr_str)); 2419 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 2420 "ip_addr=%s flags %x, spa_how %d\n", 2421 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 2422 addr_str, exid->flags, exid->spa_how); 2423 2424 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 2425 return nfserr_inval; 2426 2427 new = create_client(exid->clname, rqstp, &verf); 2428 if (new == NULL) 2429 return nfserr_jukebox; 2430 2431 switch (exid->spa_how) { 2432 case SP4_MACH_CRED: 2433 exid->spo_must_enforce[0] = 0; 2434 exid->spo_must_enforce[1] = ( 2435 1 << (OP_BIND_CONN_TO_SESSION - 32) | 2436 1 << (OP_EXCHANGE_ID - 32) | 2437 1 << (OP_CREATE_SESSION - 32) | 2438 1 << (OP_DESTROY_SESSION - 32) | 2439 1 << (OP_DESTROY_CLIENTID - 32)); 2440 2441 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 2442 1 << (OP_OPEN_DOWNGRADE) | 2443 1 << (OP_LOCKU) | 2444 1 << (OP_DELEGRETURN)); 2445 2446 exid->spo_must_allow[1] &= ( 2447 1 << (OP_TEST_STATEID - 32) | 2448 1 << (OP_FREE_STATEID - 32)); 2449 if (!svc_rqst_integrity_protected(rqstp)) { 2450 status = nfserr_inval; 2451 goto out_nolock; 2452 } 2453 /* 2454 * Sometimes userspace doesn't give us a principal. 2455 * Which is a bug, really. Anyway, we can't enforce 2456 * MACH_CRED in that case, better to give up now: 2457 */ 2458 if (!new->cl_cred.cr_principal && 2459 !new->cl_cred.cr_raw_principal) { 2460 status = nfserr_serverfault; 2461 goto out_nolock; 2462 } 2463 new->cl_mach_cred = true; 2464 case SP4_NONE: 2465 break; 2466 default: /* checked by xdr code */ 2467 WARN_ON_ONCE(1); 2468 case SP4_SSV: 2469 status = nfserr_encr_alg_unsupp; 2470 goto out_nolock; 2471 } 2472 2473 /* Cases below refer to rfc 5661 section 18.35.4: */ 2474 spin_lock(&nn->client_lock); 2475 conf = find_confirmed_client_by_name(&exid->clname, nn); 2476 if (conf) { 2477 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 2478 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 2479 2480 if (update) { 2481 if (!clp_used_exchangeid(conf)) { /* buggy client */ 2482 status = nfserr_inval; 2483 goto out; 2484 } 2485 if (!nfsd4_mach_creds_match(conf, rqstp)) { 2486 status = nfserr_wrong_cred; 2487 goto out; 2488 } 2489 if (!creds_match) { /* case 9 */ 2490 status = nfserr_perm; 2491 goto out; 2492 } 2493 if (!verfs_match) { /* case 8 */ 2494 status = nfserr_not_same; 2495 goto out; 2496 } 2497 /* case 6 */ 2498 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 2499 goto out_copy; 2500 } 2501 if (!creds_match) { /* case 3 */ 2502 if (client_has_state(conf)) { 2503 status = nfserr_clid_inuse; 2504 goto out; 2505 } 2506 goto out_new; 2507 } 2508 if (verfs_match) { /* case 2 */ 2509 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 2510 goto out_copy; 2511 } 2512 /* case 5, client reboot */ 2513 conf = NULL; 2514 goto out_new; 2515 } 2516 2517 if (update) { /* case 7 */ 2518 status = nfserr_noent; 2519 goto out; 2520 } 2521 2522 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 2523 if (unconf) /* case 4, possible retry or client restart */ 2524 unhash_client_locked(unconf); 2525 2526 /* case 1 (normal case) */ 2527 out_new: 2528 if (conf) { 2529 status = mark_client_expired_locked(conf); 2530 if (status) 2531 goto out; 2532 } 2533 new->cl_minorversion = cstate->minorversion; 2534 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; 2535 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; 2536 2537 gen_clid(new, nn); 2538 add_to_unconfirmed(new); 2539 swap(new, conf); 2540 out_copy: 2541 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 2542 exid->clientid.cl_id = conf->cl_clientid.cl_id; 2543 2544 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 2545 nfsd4_set_ex_flags(conf, exid); 2546 2547 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 2548 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 2549 status = nfs_ok; 2550 2551 out: 2552 spin_unlock(&nn->client_lock); 2553 out_nolock: 2554 if (new) 2555 expire_client(new); 2556 if (unconf) 2557 expire_client(unconf); 2558 return status; 2559 } 2560 2561 static __be32 2562 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 2563 { 2564 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 2565 slot_seqid); 2566 2567 /* The slot is in use, and no response has been sent. */ 2568 if (slot_inuse) { 2569 if (seqid == slot_seqid) 2570 return nfserr_jukebox; 2571 else 2572 return nfserr_seq_misordered; 2573 } 2574 /* Note unsigned 32-bit arithmetic handles wraparound: */ 2575 if (likely(seqid == slot_seqid + 1)) 2576 return nfs_ok; 2577 if (seqid == slot_seqid) 2578 return nfserr_replay_cache; 2579 return nfserr_seq_misordered; 2580 } 2581 2582 /* 2583 * Cache the create session result into the create session single DRC 2584 * slot cache by saving the xdr structure. sl_seqid has been set. 2585 * Do this for solo or embedded create session operations. 2586 */ 2587 static void 2588 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 2589 struct nfsd4_clid_slot *slot, __be32 nfserr) 2590 { 2591 slot->sl_status = nfserr; 2592 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 2593 } 2594 2595 static __be32 2596 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 2597 struct nfsd4_clid_slot *slot) 2598 { 2599 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 2600 return slot->sl_status; 2601 } 2602 2603 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 2604 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 2605 1 + /* MIN tag is length with zero, only length */ \ 2606 3 + /* version, opcount, opcode */ \ 2607 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 2608 /* seqid, slotID, slotID, cache */ \ 2609 4 ) * sizeof(__be32)) 2610 2611 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 2612 2 + /* verifier: AUTH_NULL, length 0 */\ 2613 1 + /* status */ \ 2614 1 + /* MIN tag is length with zero, only length */ \ 2615 3 + /* opcount, opcode, opstatus*/ \ 2616 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 2617 /* seqid, slotID, slotID, slotID, status */ \ 2618 5 ) * sizeof(__be32)) 2619 2620 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 2621 { 2622 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 2623 2624 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 2625 return nfserr_toosmall; 2626 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 2627 return nfserr_toosmall; 2628 ca->headerpadsz = 0; 2629 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 2630 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 2631 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 2632 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 2633 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 2634 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 2635 /* 2636 * Note decreasing slot size below client's request may make it 2637 * difficult for client to function correctly, whereas 2638 * decreasing the number of slots will (just?) affect 2639 * performance. When short on memory we therefore prefer to 2640 * decrease number of slots instead of their size. Clients that 2641 * request larger slots than they need will get poor results: 2642 */ 2643 ca->maxreqs = nfsd4_get_drc_mem(ca); 2644 if (!ca->maxreqs) 2645 return nfserr_jukebox; 2646 2647 return nfs_ok; 2648 } 2649 2650 /* 2651 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. 2652 * These are based on similar macros in linux/sunrpc/msg_prot.h . 2653 */ 2654 #define RPC_MAX_HEADER_WITH_AUTH_SYS \ 2655 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) 2656 2657 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ 2658 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) 2659 2660 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 2661 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) 2662 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 2663 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ 2664 sizeof(__be32)) 2665 2666 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 2667 { 2668 ca->headerpadsz = 0; 2669 2670 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 2671 return nfserr_toosmall; 2672 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 2673 return nfserr_toosmall; 2674 ca->maxresp_cached = 0; 2675 if (ca->maxops < 2) 2676 return nfserr_toosmall; 2677 2678 return nfs_ok; 2679 } 2680 2681 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 2682 { 2683 switch (cbs->flavor) { 2684 case RPC_AUTH_NULL: 2685 case RPC_AUTH_UNIX: 2686 return nfs_ok; 2687 default: 2688 /* 2689 * GSS case: the spec doesn't allow us to return this 2690 * error. But it also doesn't allow us not to support 2691 * GSS. 2692 * I'd rather this fail hard than return some error the 2693 * client might think it can already handle: 2694 */ 2695 return nfserr_encr_alg_unsupp; 2696 } 2697 } 2698 2699 __be32 2700 nfsd4_create_session(struct svc_rqst *rqstp, 2701 struct nfsd4_compound_state *cstate, 2702 struct nfsd4_create_session *cr_ses) 2703 { 2704 struct sockaddr *sa = svc_addr(rqstp); 2705 struct nfs4_client *conf, *unconf; 2706 struct nfs4_client *old = NULL; 2707 struct nfsd4_session *new; 2708 struct nfsd4_conn *conn; 2709 struct nfsd4_clid_slot *cs_slot = NULL; 2710 __be32 status = 0; 2711 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2712 2713 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 2714 return nfserr_inval; 2715 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 2716 if (status) 2717 return status; 2718 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 2719 if (status) 2720 return status; 2721 status = check_backchannel_attrs(&cr_ses->back_channel); 2722 if (status) 2723 goto out_release_drc_mem; 2724 status = nfserr_jukebox; 2725 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 2726 if (!new) 2727 goto out_release_drc_mem; 2728 conn = alloc_conn_from_crses(rqstp, cr_ses); 2729 if (!conn) 2730 goto out_free_session; 2731 2732 spin_lock(&nn->client_lock); 2733 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 2734 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 2735 WARN_ON_ONCE(conf && unconf); 2736 2737 if (conf) { 2738 status = nfserr_wrong_cred; 2739 if (!nfsd4_mach_creds_match(conf, rqstp)) 2740 goto out_free_conn; 2741 cs_slot = &conf->cl_cs_slot; 2742 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 2743 if (status) { 2744 if (status == nfserr_replay_cache) 2745 status = nfsd4_replay_create_session(cr_ses, cs_slot); 2746 goto out_free_conn; 2747 } 2748 } else if (unconf) { 2749 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 2750 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 2751 status = nfserr_clid_inuse; 2752 goto out_free_conn; 2753 } 2754 status = nfserr_wrong_cred; 2755 if (!nfsd4_mach_creds_match(unconf, rqstp)) 2756 goto out_free_conn; 2757 cs_slot = &unconf->cl_cs_slot; 2758 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 2759 if (status) { 2760 /* an unconfirmed replay returns misordered */ 2761 status = nfserr_seq_misordered; 2762 goto out_free_conn; 2763 } 2764 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 2765 if (old) { 2766 status = mark_client_expired_locked(old); 2767 if (status) { 2768 old = NULL; 2769 goto out_free_conn; 2770 } 2771 } 2772 move_to_confirmed(unconf); 2773 conf = unconf; 2774 } else { 2775 status = nfserr_stale_clientid; 2776 goto out_free_conn; 2777 } 2778 status = nfs_ok; 2779 /* Persistent sessions are not supported */ 2780 cr_ses->flags &= ~SESSION4_PERSIST; 2781 /* Upshifting from TCP to RDMA is not supported */ 2782 cr_ses->flags &= ~SESSION4_RDMA; 2783 2784 init_session(rqstp, new, conf, cr_ses); 2785 nfsd4_get_session_locked(new); 2786 2787 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 2788 NFS4_MAX_SESSIONID_LEN); 2789 cs_slot->sl_seqid++; 2790 cr_ses->seqid = cs_slot->sl_seqid; 2791 2792 /* cache solo and embedded create sessions under the client_lock */ 2793 nfsd4_cache_create_session(cr_ses, cs_slot, status); 2794 spin_unlock(&nn->client_lock); 2795 /* init connection and backchannel */ 2796 nfsd4_init_conn(rqstp, conn, new); 2797 nfsd4_put_session(new); 2798 if (old) 2799 expire_client(old); 2800 return status; 2801 out_free_conn: 2802 spin_unlock(&nn->client_lock); 2803 free_conn(conn); 2804 if (old) 2805 expire_client(old); 2806 out_free_session: 2807 __free_session(new); 2808 out_release_drc_mem: 2809 nfsd4_put_drc_mem(&cr_ses->fore_channel); 2810 return status; 2811 } 2812 2813 static __be32 nfsd4_map_bcts_dir(u32 *dir) 2814 { 2815 switch (*dir) { 2816 case NFS4_CDFC4_FORE: 2817 case NFS4_CDFC4_BACK: 2818 return nfs_ok; 2819 case NFS4_CDFC4_FORE_OR_BOTH: 2820 case NFS4_CDFC4_BACK_OR_BOTH: 2821 *dir = NFS4_CDFC4_BOTH; 2822 return nfs_ok; 2823 }; 2824 return nfserr_inval; 2825 } 2826 2827 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc) 2828 { 2829 struct nfsd4_session *session = cstate->session; 2830 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2831 __be32 status; 2832 2833 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 2834 if (status) 2835 return status; 2836 spin_lock(&nn->client_lock); 2837 session->se_cb_prog = bc->bc_cb_program; 2838 session->se_cb_sec = bc->bc_cb_sec; 2839 spin_unlock(&nn->client_lock); 2840 2841 nfsd4_probe_callback(session->se_client); 2842 2843 return nfs_ok; 2844 } 2845 2846 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 2847 struct nfsd4_compound_state *cstate, 2848 struct nfsd4_bind_conn_to_session *bcts) 2849 { 2850 __be32 status; 2851 struct nfsd4_conn *conn; 2852 struct nfsd4_session *session; 2853 struct net *net = SVC_NET(rqstp); 2854 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2855 2856 if (!nfsd4_last_compound_op(rqstp)) 2857 return nfserr_not_only_op; 2858 spin_lock(&nn->client_lock); 2859 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 2860 spin_unlock(&nn->client_lock); 2861 if (!session) 2862 goto out_no_session; 2863 status = nfserr_wrong_cred; 2864 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 2865 goto out; 2866 status = nfsd4_map_bcts_dir(&bcts->dir); 2867 if (status) 2868 goto out; 2869 conn = alloc_conn(rqstp, bcts->dir); 2870 status = nfserr_jukebox; 2871 if (!conn) 2872 goto out; 2873 nfsd4_init_conn(rqstp, conn, session); 2874 status = nfs_ok; 2875 out: 2876 nfsd4_put_session(session); 2877 out_no_session: 2878 return status; 2879 } 2880 2881 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 2882 { 2883 if (!session) 2884 return 0; 2885 return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); 2886 } 2887 2888 __be32 2889 nfsd4_destroy_session(struct svc_rqst *r, 2890 struct nfsd4_compound_state *cstate, 2891 struct nfsd4_destroy_session *sessionid) 2892 { 2893 struct nfsd4_session *ses; 2894 __be32 status; 2895 int ref_held_by_me = 0; 2896 struct net *net = SVC_NET(r); 2897 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2898 2899 status = nfserr_not_only_op; 2900 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 2901 if (!nfsd4_last_compound_op(r)) 2902 goto out; 2903 ref_held_by_me++; 2904 } 2905 dump_sessionid(__func__, &sessionid->sessionid); 2906 spin_lock(&nn->client_lock); 2907 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status); 2908 if (!ses) 2909 goto out_client_lock; 2910 status = nfserr_wrong_cred; 2911 if (!nfsd4_mach_creds_match(ses->se_client, r)) 2912 goto out_put_session; 2913 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 2914 if (status) 2915 goto out_put_session; 2916 unhash_session(ses); 2917 spin_unlock(&nn->client_lock); 2918 2919 nfsd4_probe_callback_sync(ses->se_client); 2920 2921 spin_lock(&nn->client_lock); 2922 status = nfs_ok; 2923 out_put_session: 2924 nfsd4_put_session_locked(ses); 2925 out_client_lock: 2926 spin_unlock(&nn->client_lock); 2927 out: 2928 return status; 2929 } 2930 2931 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 2932 { 2933 struct nfsd4_conn *c; 2934 2935 list_for_each_entry(c, &s->se_conns, cn_persession) { 2936 if (c->cn_xprt == xpt) { 2937 return c; 2938 } 2939 } 2940 return NULL; 2941 } 2942 2943 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 2944 { 2945 struct nfs4_client *clp = ses->se_client; 2946 struct nfsd4_conn *c; 2947 __be32 status = nfs_ok; 2948 int ret; 2949 2950 spin_lock(&clp->cl_lock); 2951 c = __nfsd4_find_conn(new->cn_xprt, ses); 2952 if (c) 2953 goto out_free; 2954 status = nfserr_conn_not_bound_to_session; 2955 if (clp->cl_mach_cred) 2956 goto out_free; 2957 __nfsd4_hash_conn(new, ses); 2958 spin_unlock(&clp->cl_lock); 2959 ret = nfsd4_register_conn(new); 2960 if (ret) 2961 /* oops; xprt is already down: */ 2962 nfsd4_conn_lost(&new->cn_xpt_user); 2963 return nfs_ok; 2964 out_free: 2965 spin_unlock(&clp->cl_lock); 2966 free_conn(new); 2967 return status; 2968 } 2969 2970 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 2971 { 2972 struct nfsd4_compoundargs *args = rqstp->rq_argp; 2973 2974 return args->opcnt > session->se_fchannel.maxops; 2975 } 2976 2977 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 2978 struct nfsd4_session *session) 2979 { 2980 struct xdr_buf *xb = &rqstp->rq_arg; 2981 2982 return xb->len > session->se_fchannel.maxreq_sz; 2983 } 2984 2985 __be32 2986 nfsd4_sequence(struct svc_rqst *rqstp, 2987 struct nfsd4_compound_state *cstate, 2988 struct nfsd4_sequence *seq) 2989 { 2990 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2991 struct xdr_stream *xdr = &resp->xdr; 2992 struct nfsd4_session *session; 2993 struct nfs4_client *clp; 2994 struct nfsd4_slot *slot; 2995 struct nfsd4_conn *conn; 2996 __be32 status; 2997 int buflen; 2998 struct net *net = SVC_NET(rqstp); 2999 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3000 3001 if (resp->opcnt != 1) 3002 return nfserr_sequence_pos; 3003 3004 /* 3005 * Will be either used or freed by nfsd4_sequence_check_conn 3006 * below. 3007 */ 3008 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 3009 if (!conn) 3010 return nfserr_jukebox; 3011 3012 spin_lock(&nn->client_lock); 3013 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 3014 if (!session) 3015 goto out_no_session; 3016 clp = session->se_client; 3017 3018 status = nfserr_too_many_ops; 3019 if (nfsd4_session_too_many_ops(rqstp, session)) 3020 goto out_put_session; 3021 3022 status = nfserr_req_too_big; 3023 if (nfsd4_request_too_big(rqstp, session)) 3024 goto out_put_session; 3025 3026 status = nfserr_badslot; 3027 if (seq->slotid >= session->se_fchannel.maxreqs) 3028 goto out_put_session; 3029 3030 slot = session->se_slots[seq->slotid]; 3031 dprintk("%s: slotid %d\n", __func__, seq->slotid); 3032 3033 /* We do not negotiate the number of slots yet, so set the 3034 * maxslots to the session maxreqs which is used to encode 3035 * sr_highest_slotid and the sr_target_slot id to maxslots */ 3036 seq->maxslots = session->se_fchannel.maxreqs; 3037 3038 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 3039 slot->sl_flags & NFSD4_SLOT_INUSE); 3040 if (status == nfserr_replay_cache) { 3041 status = nfserr_seq_misordered; 3042 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 3043 goto out_put_session; 3044 cstate->slot = slot; 3045 cstate->session = session; 3046 cstate->clp = clp; 3047 /* Return the cached reply status and set cstate->status 3048 * for nfsd4_proc_compound processing */ 3049 status = nfsd4_replay_cache_entry(resp, seq); 3050 cstate->status = nfserr_replay_cache; 3051 goto out; 3052 } 3053 if (status) 3054 goto out_put_session; 3055 3056 status = nfsd4_sequence_check_conn(conn, session); 3057 conn = NULL; 3058 if (status) 3059 goto out_put_session; 3060 3061 buflen = (seq->cachethis) ? 3062 session->se_fchannel.maxresp_cached : 3063 session->se_fchannel.maxresp_sz; 3064 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 3065 nfserr_rep_too_big; 3066 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 3067 goto out_put_session; 3068 svc_reserve(rqstp, buflen); 3069 3070 status = nfs_ok; 3071 /* Success! bump slot seqid */ 3072 slot->sl_seqid = seq->seqid; 3073 slot->sl_flags |= NFSD4_SLOT_INUSE; 3074 if (seq->cachethis) 3075 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 3076 else 3077 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 3078 3079 cstate->slot = slot; 3080 cstate->session = session; 3081 cstate->clp = clp; 3082 3083 out: 3084 switch (clp->cl_cb_state) { 3085 case NFSD4_CB_DOWN: 3086 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 3087 break; 3088 case NFSD4_CB_FAULT: 3089 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 3090 break; 3091 default: 3092 seq->status_flags = 0; 3093 } 3094 if (!list_empty(&clp->cl_revoked)) 3095 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 3096 out_no_session: 3097 if (conn) 3098 free_conn(conn); 3099 spin_unlock(&nn->client_lock); 3100 return status; 3101 out_put_session: 3102 nfsd4_put_session_locked(session); 3103 goto out_no_session; 3104 } 3105 3106 void 3107 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 3108 { 3109 struct nfsd4_compound_state *cs = &resp->cstate; 3110 3111 if (nfsd4_has_session(cs)) { 3112 if (cs->status != nfserr_replay_cache) { 3113 nfsd4_store_cache_entry(resp); 3114 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 3115 } 3116 /* Drop session reference that was taken in nfsd4_sequence() */ 3117 nfsd4_put_session(cs->session); 3118 } else if (cs->clp) 3119 put_client_renew(cs->clp); 3120 } 3121 3122 __be32 3123 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 3124 { 3125 struct nfs4_client *conf, *unconf; 3126 struct nfs4_client *clp = NULL; 3127 __be32 status = 0; 3128 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3129 3130 spin_lock(&nn->client_lock); 3131 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 3132 conf = find_confirmed_client(&dc->clientid, true, nn); 3133 WARN_ON_ONCE(conf && unconf); 3134 3135 if (conf) { 3136 if (client_has_state(conf)) { 3137 status = nfserr_clientid_busy; 3138 goto out; 3139 } 3140 status = mark_client_expired_locked(conf); 3141 if (status) 3142 goto out; 3143 clp = conf; 3144 } else if (unconf) 3145 clp = unconf; 3146 else { 3147 status = nfserr_stale_clientid; 3148 goto out; 3149 } 3150 if (!nfsd4_mach_creds_match(clp, rqstp)) { 3151 clp = NULL; 3152 status = nfserr_wrong_cred; 3153 goto out; 3154 } 3155 unhash_client_locked(clp); 3156 out: 3157 spin_unlock(&nn->client_lock); 3158 if (clp) 3159 expire_client(clp); 3160 return status; 3161 } 3162 3163 __be32 3164 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 3165 { 3166 __be32 status = 0; 3167 3168 if (rc->rca_one_fs) { 3169 if (!cstate->current_fh.fh_dentry) 3170 return nfserr_nofilehandle; 3171 /* 3172 * We don't take advantage of the rca_one_fs case. 3173 * That's OK, it's optional, we can safely ignore it. 3174 */ 3175 return nfs_ok; 3176 } 3177 3178 status = nfserr_complete_already; 3179 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, 3180 &cstate->session->se_client->cl_flags)) 3181 goto out; 3182 3183 status = nfserr_stale_clientid; 3184 if (is_client_expired(cstate->session->se_client)) 3185 /* 3186 * The following error isn't really legal. 3187 * But we only get here if the client just explicitly 3188 * destroyed the client. Surely it no longer cares what 3189 * error it gets back on an operation for the dead 3190 * client. 3191 */ 3192 goto out; 3193 3194 status = nfs_ok; 3195 nfsd4_client_record_create(cstate->session->se_client); 3196 out: 3197 return status; 3198 } 3199 3200 __be32 3201 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3202 struct nfsd4_setclientid *setclid) 3203 { 3204 struct xdr_netobj clname = setclid->se_name; 3205 nfs4_verifier clverifier = setclid->se_verf; 3206 struct nfs4_client *conf, *new; 3207 struct nfs4_client *unconf = NULL; 3208 __be32 status; 3209 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3210 3211 new = create_client(clname, rqstp, &clverifier); 3212 if (new == NULL) 3213 return nfserr_jukebox; 3214 /* Cases below refer to rfc 3530 section 14.2.33: */ 3215 spin_lock(&nn->client_lock); 3216 conf = find_confirmed_client_by_name(&clname, nn); 3217 if (conf && client_has_state(conf)) { 3218 /* case 0: */ 3219 status = nfserr_clid_inuse; 3220 if (clp_used_exchangeid(conf)) 3221 goto out; 3222 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 3223 char addr_str[INET6_ADDRSTRLEN]; 3224 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, 3225 sizeof(addr_str)); 3226 dprintk("NFSD: setclientid: string in use by client " 3227 "at %s\n", addr_str); 3228 goto out; 3229 } 3230 } 3231 unconf = find_unconfirmed_client_by_name(&clname, nn); 3232 if (unconf) 3233 unhash_client_locked(unconf); 3234 if (conf && same_verf(&conf->cl_verifier, &clverifier)) { 3235 /* case 1: probable callback update */ 3236 copy_clid(new, conf); 3237 gen_confirm(new, nn); 3238 } else /* case 4 (new client) or cases 2, 3 (client reboot): */ 3239 gen_clid(new, nn); 3240 new->cl_minorversion = 0; 3241 gen_callback(new, setclid, rqstp); 3242 add_to_unconfirmed(new); 3243 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 3244 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 3245 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 3246 new = NULL; 3247 status = nfs_ok; 3248 out: 3249 spin_unlock(&nn->client_lock); 3250 if (new) 3251 free_client(new); 3252 if (unconf) 3253 expire_client(unconf); 3254 return status; 3255 } 3256 3257 3258 __be32 3259 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 3260 struct nfsd4_compound_state *cstate, 3261 struct nfsd4_setclientid_confirm *setclientid_confirm) 3262 { 3263 struct nfs4_client *conf, *unconf; 3264 struct nfs4_client *old = NULL; 3265 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 3266 clientid_t * clid = &setclientid_confirm->sc_clientid; 3267 __be32 status; 3268 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3269 3270 if (STALE_CLIENTID(clid, nn)) 3271 return nfserr_stale_clientid; 3272 3273 spin_lock(&nn->client_lock); 3274 conf = find_confirmed_client(clid, false, nn); 3275 unconf = find_unconfirmed_client(clid, false, nn); 3276 /* 3277 * We try hard to give out unique clientid's, so if we get an 3278 * attempt to confirm the same clientid with a different cred, 3279 * the client may be buggy; this should never happen. 3280 * 3281 * Nevertheless, RFC 7530 recommends INUSE for this case: 3282 */ 3283 status = nfserr_clid_inuse; 3284 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) 3285 goto out; 3286 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) 3287 goto out; 3288 /* cases below refer to rfc 3530 section 14.2.34: */ 3289 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 3290 if (conf && same_verf(&confirm, &conf->cl_confirm)) { 3291 /* case 2: probable retransmit */ 3292 status = nfs_ok; 3293 } else /* case 4: client hasn't noticed we rebooted yet? */ 3294 status = nfserr_stale_clientid; 3295 goto out; 3296 } 3297 status = nfs_ok; 3298 if (conf) { /* case 1: callback update */ 3299 old = unconf; 3300 unhash_client_locked(old); 3301 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 3302 } else { /* case 3: normal case; new or rebooted client */ 3303 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 3304 if (old) { 3305 status = nfserr_clid_inuse; 3306 if (client_has_state(old) 3307 && !same_creds(&unconf->cl_cred, 3308 &old->cl_cred)) 3309 goto out; 3310 status = mark_client_expired_locked(old); 3311 if (status) { 3312 old = NULL; 3313 goto out; 3314 } 3315 } 3316 move_to_confirmed(unconf); 3317 conf = unconf; 3318 } 3319 get_client_locked(conf); 3320 spin_unlock(&nn->client_lock); 3321 nfsd4_probe_callback(conf); 3322 spin_lock(&nn->client_lock); 3323 put_client_renew_locked(conf); 3324 out: 3325 spin_unlock(&nn->client_lock); 3326 if (old) 3327 expire_client(old); 3328 return status; 3329 } 3330 3331 static struct nfs4_file *nfsd4_alloc_file(void) 3332 { 3333 return kmem_cache_alloc(file_slab, GFP_KERNEL); 3334 } 3335 3336 /* OPEN Share state helper functions */ 3337 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, 3338 struct nfs4_file *fp) 3339 { 3340 lockdep_assert_held(&state_lock); 3341 3342 atomic_set(&fp->fi_ref, 1); 3343 spin_lock_init(&fp->fi_lock); 3344 INIT_LIST_HEAD(&fp->fi_stateids); 3345 INIT_LIST_HEAD(&fp->fi_delegations); 3346 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 3347 fh_copy_shallow(&fp->fi_fhandle, fh); 3348 fp->fi_deleg_file = NULL; 3349 fp->fi_had_conflict = false; 3350 fp->fi_share_deny = 0; 3351 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 3352 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 3353 #ifdef CONFIG_NFSD_PNFS 3354 INIT_LIST_HEAD(&fp->fi_lo_states); 3355 atomic_set(&fp->fi_lo_recalls, 0); 3356 #endif 3357 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); 3358 } 3359 3360 void 3361 nfsd4_free_slabs(void) 3362 { 3363 kmem_cache_destroy(odstate_slab); 3364 kmem_cache_destroy(openowner_slab); 3365 kmem_cache_destroy(lockowner_slab); 3366 kmem_cache_destroy(file_slab); 3367 kmem_cache_destroy(stateid_slab); 3368 kmem_cache_destroy(deleg_slab); 3369 } 3370 3371 int 3372 nfsd4_init_slabs(void) 3373 { 3374 openowner_slab = kmem_cache_create("nfsd4_openowners", 3375 sizeof(struct nfs4_openowner), 0, 0, NULL); 3376 if (openowner_slab == NULL) 3377 goto out; 3378 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 3379 sizeof(struct nfs4_lockowner), 0, 0, NULL); 3380 if (lockowner_slab == NULL) 3381 goto out_free_openowner_slab; 3382 file_slab = kmem_cache_create("nfsd4_files", 3383 sizeof(struct nfs4_file), 0, 0, NULL); 3384 if (file_slab == NULL) 3385 goto out_free_lockowner_slab; 3386 stateid_slab = kmem_cache_create("nfsd4_stateids", 3387 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 3388 if (stateid_slab == NULL) 3389 goto out_free_file_slab; 3390 deleg_slab = kmem_cache_create("nfsd4_delegations", 3391 sizeof(struct nfs4_delegation), 0, 0, NULL); 3392 if (deleg_slab == NULL) 3393 goto out_free_stateid_slab; 3394 odstate_slab = kmem_cache_create("nfsd4_odstate", 3395 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); 3396 if (odstate_slab == NULL) 3397 goto out_free_deleg_slab; 3398 return 0; 3399 3400 out_free_deleg_slab: 3401 kmem_cache_destroy(deleg_slab); 3402 out_free_stateid_slab: 3403 kmem_cache_destroy(stateid_slab); 3404 out_free_file_slab: 3405 kmem_cache_destroy(file_slab); 3406 out_free_lockowner_slab: 3407 kmem_cache_destroy(lockowner_slab); 3408 out_free_openowner_slab: 3409 kmem_cache_destroy(openowner_slab); 3410 out: 3411 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 3412 return -ENOMEM; 3413 } 3414 3415 static void init_nfs4_replay(struct nfs4_replay *rp) 3416 { 3417 rp->rp_status = nfserr_serverfault; 3418 rp->rp_buflen = 0; 3419 rp->rp_buf = rp->rp_ibuf; 3420 mutex_init(&rp->rp_mutex); 3421 } 3422 3423 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 3424 struct nfs4_stateowner *so) 3425 { 3426 if (!nfsd4_has_session(cstate)) { 3427 mutex_lock(&so->so_replay.rp_mutex); 3428 cstate->replay_owner = nfs4_get_stateowner(so); 3429 } 3430 } 3431 3432 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 3433 { 3434 struct nfs4_stateowner *so = cstate->replay_owner; 3435 3436 if (so != NULL) { 3437 cstate->replay_owner = NULL; 3438 mutex_unlock(&so->so_replay.rp_mutex); 3439 nfs4_put_stateowner(so); 3440 } 3441 } 3442 3443 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 3444 { 3445 struct nfs4_stateowner *sop; 3446 3447 sop = kmem_cache_alloc(slab, GFP_KERNEL); 3448 if (!sop) 3449 return NULL; 3450 3451 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); 3452 if (!sop->so_owner.data) { 3453 kmem_cache_free(slab, sop); 3454 return NULL; 3455 } 3456 sop->so_owner.len = owner->len; 3457 3458 INIT_LIST_HEAD(&sop->so_stateids); 3459 sop->so_client = clp; 3460 init_nfs4_replay(&sop->so_replay); 3461 atomic_set(&sop->so_count, 1); 3462 return sop; 3463 } 3464 3465 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 3466 { 3467 lockdep_assert_held(&clp->cl_lock); 3468 3469 list_add(&oo->oo_owner.so_strhash, 3470 &clp->cl_ownerstr_hashtbl[strhashval]); 3471 list_add(&oo->oo_perclient, &clp->cl_openowners); 3472 } 3473 3474 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 3475 { 3476 unhash_openowner_locked(openowner(so)); 3477 } 3478 3479 static void nfs4_free_openowner(struct nfs4_stateowner *so) 3480 { 3481 struct nfs4_openowner *oo = openowner(so); 3482 3483 kmem_cache_free(openowner_slab, oo); 3484 } 3485 3486 static const struct nfs4_stateowner_operations openowner_ops = { 3487 .so_unhash = nfs4_unhash_openowner, 3488 .so_free = nfs4_free_openowner, 3489 }; 3490 3491 static struct nfs4_ol_stateid * 3492 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 3493 { 3494 struct nfs4_ol_stateid *local, *ret = NULL; 3495 struct nfs4_openowner *oo = open->op_openowner; 3496 3497 lockdep_assert_held(&fp->fi_lock); 3498 3499 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 3500 /* ignore lock owners */ 3501 if (local->st_stateowner->so_is_open_owner == 0) 3502 continue; 3503 if (local->st_stateowner == &oo->oo_owner) { 3504 ret = local; 3505 atomic_inc(&ret->st_stid.sc_count); 3506 break; 3507 } 3508 } 3509 return ret; 3510 } 3511 3512 static struct nfs4_openowner * 3513 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 3514 struct nfsd4_compound_state *cstate) 3515 { 3516 struct nfs4_client *clp = cstate->clp; 3517 struct nfs4_openowner *oo, *ret; 3518 3519 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 3520 if (!oo) 3521 return NULL; 3522 oo->oo_owner.so_ops = &openowner_ops; 3523 oo->oo_owner.so_is_open_owner = 1; 3524 oo->oo_owner.so_seqid = open->op_seqid; 3525 oo->oo_flags = 0; 3526 if (nfsd4_has_session(cstate)) 3527 oo->oo_flags |= NFS4_OO_CONFIRMED; 3528 oo->oo_time = 0; 3529 oo->oo_last_closed_stid = NULL; 3530 INIT_LIST_HEAD(&oo->oo_close_lru); 3531 spin_lock(&clp->cl_lock); 3532 ret = find_openstateowner_str_locked(strhashval, open, clp); 3533 if (ret == NULL) { 3534 hash_openowner(oo, clp, strhashval); 3535 ret = oo; 3536 } else 3537 nfs4_free_stateowner(&oo->oo_owner); 3538 3539 spin_unlock(&clp->cl_lock); 3540 return ret; 3541 } 3542 3543 static struct nfs4_ol_stateid * 3544 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) 3545 { 3546 3547 struct nfs4_openowner *oo = open->op_openowner; 3548 struct nfs4_ol_stateid *retstp = NULL; 3549 struct nfs4_ol_stateid *stp; 3550 3551 stp = open->op_stp; 3552 /* We are moving these outside of the spinlocks to avoid the warnings */ 3553 mutex_init(&stp->st_mutex); 3554 mutex_lock(&stp->st_mutex); 3555 3556 spin_lock(&oo->oo_owner.so_client->cl_lock); 3557 spin_lock(&fp->fi_lock); 3558 3559 retstp = nfsd4_find_existing_open(fp, open); 3560 if (retstp) 3561 goto out_unlock; 3562 3563 open->op_stp = NULL; 3564 atomic_inc(&stp->st_stid.sc_count); 3565 stp->st_stid.sc_type = NFS4_OPEN_STID; 3566 INIT_LIST_HEAD(&stp->st_locks); 3567 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 3568 get_nfs4_file(fp); 3569 stp->st_stid.sc_file = fp; 3570 stp->st_access_bmap = 0; 3571 stp->st_deny_bmap = 0; 3572 stp->st_openstp = NULL; 3573 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 3574 list_add(&stp->st_perfile, &fp->fi_stateids); 3575 3576 out_unlock: 3577 spin_unlock(&fp->fi_lock); 3578 spin_unlock(&oo->oo_owner.so_client->cl_lock); 3579 if (retstp) { 3580 mutex_lock(&retstp->st_mutex); 3581 /* To keep mutex tracking happy */ 3582 mutex_unlock(&stp->st_mutex); 3583 stp = retstp; 3584 } 3585 return stp; 3586 } 3587 3588 /* 3589 * In the 4.0 case we need to keep the owners around a little while to handle 3590 * CLOSE replay. We still do need to release any file access that is held by 3591 * them before returning however. 3592 */ 3593 static void 3594 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 3595 { 3596 struct nfs4_ol_stateid *last; 3597 struct nfs4_openowner *oo = openowner(s->st_stateowner); 3598 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 3599 nfsd_net_id); 3600 3601 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 3602 3603 /* 3604 * We know that we hold one reference via nfsd4_close, and another 3605 * "persistent" reference for the client. If the refcount is higher 3606 * than 2, then there are still calls in progress that are using this 3607 * stateid. We can't put the sc_file reference until they are finished. 3608 * Wait for the refcount to drop to 2. Since it has been unhashed, 3609 * there should be no danger of the refcount going back up again at 3610 * this point. 3611 */ 3612 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2); 3613 3614 release_all_access(s); 3615 if (s->st_stid.sc_file) { 3616 put_nfs4_file(s->st_stid.sc_file); 3617 s->st_stid.sc_file = NULL; 3618 } 3619 3620 spin_lock(&nn->client_lock); 3621 last = oo->oo_last_closed_stid; 3622 oo->oo_last_closed_stid = s; 3623 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 3624 oo->oo_time = get_seconds(); 3625 spin_unlock(&nn->client_lock); 3626 if (last) 3627 nfs4_put_stid(&last->st_stid); 3628 } 3629 3630 /* search file_hashtbl[] for file */ 3631 static struct nfs4_file * 3632 find_file_locked(struct knfsd_fh *fh, unsigned int hashval) 3633 { 3634 struct nfs4_file *fp; 3635 3636 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) { 3637 if (fh_match(&fp->fi_fhandle, fh)) { 3638 if (atomic_inc_not_zero(&fp->fi_ref)) 3639 return fp; 3640 } 3641 } 3642 return NULL; 3643 } 3644 3645 struct nfs4_file * 3646 find_file(struct knfsd_fh *fh) 3647 { 3648 struct nfs4_file *fp; 3649 unsigned int hashval = file_hashval(fh); 3650 3651 rcu_read_lock(); 3652 fp = find_file_locked(fh, hashval); 3653 rcu_read_unlock(); 3654 return fp; 3655 } 3656 3657 static struct nfs4_file * 3658 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) 3659 { 3660 struct nfs4_file *fp; 3661 unsigned int hashval = file_hashval(fh); 3662 3663 rcu_read_lock(); 3664 fp = find_file_locked(fh, hashval); 3665 rcu_read_unlock(); 3666 if (fp) 3667 return fp; 3668 3669 spin_lock(&state_lock); 3670 fp = find_file_locked(fh, hashval); 3671 if (likely(fp == NULL)) { 3672 nfsd4_init_file(fh, hashval, new); 3673 fp = new; 3674 } 3675 spin_unlock(&state_lock); 3676 3677 return fp; 3678 } 3679 3680 /* 3681 * Called to check deny when READ with all zero stateid or 3682 * WRITE with all zero or all one stateid 3683 */ 3684 static __be32 3685 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 3686 { 3687 struct nfs4_file *fp; 3688 __be32 ret = nfs_ok; 3689 3690 fp = find_file(¤t_fh->fh_handle); 3691 if (!fp) 3692 return ret; 3693 /* Check for conflicting share reservations */ 3694 spin_lock(&fp->fi_lock); 3695 if (fp->fi_share_deny & deny_type) 3696 ret = nfserr_locked; 3697 spin_unlock(&fp->fi_lock); 3698 put_nfs4_file(fp); 3699 return ret; 3700 } 3701 3702 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 3703 { 3704 struct nfs4_delegation *dp = cb_to_delegation(cb); 3705 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 3706 nfsd_net_id); 3707 3708 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 3709 3710 /* 3711 * We can't do this in nfsd_break_deleg_cb because it is 3712 * already holding inode->i_lock. 3713 * 3714 * If the dl_time != 0, then we know that it has already been 3715 * queued for a lease break. Don't queue it again. 3716 */ 3717 spin_lock(&state_lock); 3718 if (dp->dl_time == 0) { 3719 dp->dl_time = get_seconds(); 3720 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 3721 } 3722 spin_unlock(&state_lock); 3723 } 3724 3725 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 3726 struct rpc_task *task) 3727 { 3728 struct nfs4_delegation *dp = cb_to_delegation(cb); 3729 3730 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID) 3731 return 1; 3732 3733 switch (task->tk_status) { 3734 case 0: 3735 return 1; 3736 case -EBADHANDLE: 3737 case -NFS4ERR_BAD_STATEID: 3738 /* 3739 * Race: client probably got cb_recall before open reply 3740 * granting delegation. 3741 */ 3742 if (dp->dl_retries--) { 3743 rpc_delay(task, 2 * HZ); 3744 return 0; 3745 } 3746 /*FALLTHRU*/ 3747 default: 3748 return -1; 3749 } 3750 } 3751 3752 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 3753 { 3754 struct nfs4_delegation *dp = cb_to_delegation(cb); 3755 3756 nfs4_put_stid(&dp->dl_stid); 3757 } 3758 3759 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 3760 .prepare = nfsd4_cb_recall_prepare, 3761 .done = nfsd4_cb_recall_done, 3762 .release = nfsd4_cb_recall_release, 3763 }; 3764 3765 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 3766 { 3767 /* 3768 * We're assuming the state code never drops its reference 3769 * without first removing the lease. Since we're in this lease 3770 * callback (and since the lease code is serialized by the kernel 3771 * lock) we know the server hasn't removed the lease yet, we know 3772 * it's safe to take a reference. 3773 */ 3774 atomic_inc(&dp->dl_stid.sc_count); 3775 nfsd4_run_cb(&dp->dl_recall); 3776 } 3777 3778 /* Called from break_lease() with i_lock held. */ 3779 static bool 3780 nfsd_break_deleg_cb(struct file_lock *fl) 3781 { 3782 bool ret = false; 3783 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; 3784 struct nfs4_delegation *dp; 3785 3786 if (!fp) { 3787 WARN(1, "(%p)->fl_owner NULL\n", fl); 3788 return ret; 3789 } 3790 if (fp->fi_had_conflict) { 3791 WARN(1, "duplicate break on %p\n", fp); 3792 return ret; 3793 } 3794 /* 3795 * We don't want the locks code to timeout the lease for us; 3796 * we'll remove it ourself if a delegation isn't returned 3797 * in time: 3798 */ 3799 fl->fl_break_time = 0; 3800 3801 spin_lock(&fp->fi_lock); 3802 fp->fi_had_conflict = true; 3803 /* 3804 * If there are no delegations on the list, then return true 3805 * so that the lease code will go ahead and delete it. 3806 */ 3807 if (list_empty(&fp->fi_delegations)) 3808 ret = true; 3809 else 3810 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 3811 nfsd_break_one_deleg(dp); 3812 spin_unlock(&fp->fi_lock); 3813 return ret; 3814 } 3815 3816 static int 3817 nfsd_change_deleg_cb(struct file_lock *onlist, int arg, 3818 struct list_head *dispose) 3819 { 3820 if (arg & F_UNLCK) 3821 return lease_modify(onlist, arg, dispose); 3822 else 3823 return -EAGAIN; 3824 } 3825 3826 static const struct lock_manager_operations nfsd_lease_mng_ops = { 3827 .lm_break = nfsd_break_deleg_cb, 3828 .lm_change = nfsd_change_deleg_cb, 3829 }; 3830 3831 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 3832 { 3833 if (nfsd4_has_session(cstate)) 3834 return nfs_ok; 3835 if (seqid == so->so_seqid - 1) 3836 return nfserr_replay_me; 3837 if (seqid == so->so_seqid) 3838 return nfs_ok; 3839 return nfserr_bad_seqid; 3840 } 3841 3842 static __be32 lookup_clientid(clientid_t *clid, 3843 struct nfsd4_compound_state *cstate, 3844 struct nfsd_net *nn) 3845 { 3846 struct nfs4_client *found; 3847 3848 if (cstate->clp) { 3849 found = cstate->clp; 3850 if (!same_clid(&found->cl_clientid, clid)) 3851 return nfserr_stale_clientid; 3852 return nfs_ok; 3853 } 3854 3855 if (STALE_CLIENTID(clid, nn)) 3856 return nfserr_stale_clientid; 3857 3858 /* 3859 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one 3860 * cached already then we know this is for is for v4.0 and "sessions" 3861 * will be false. 3862 */ 3863 WARN_ON_ONCE(cstate->session); 3864 spin_lock(&nn->client_lock); 3865 found = find_confirmed_client(clid, false, nn); 3866 if (!found) { 3867 spin_unlock(&nn->client_lock); 3868 return nfserr_expired; 3869 } 3870 atomic_inc(&found->cl_refcount); 3871 spin_unlock(&nn->client_lock); 3872 3873 /* Cache the nfs4_client in cstate! */ 3874 cstate->clp = found; 3875 return nfs_ok; 3876 } 3877 3878 __be32 3879 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 3880 struct nfsd4_open *open, struct nfsd_net *nn) 3881 { 3882 clientid_t *clientid = &open->op_clientid; 3883 struct nfs4_client *clp = NULL; 3884 unsigned int strhashval; 3885 struct nfs4_openowner *oo = NULL; 3886 __be32 status; 3887 3888 if (STALE_CLIENTID(&open->op_clientid, nn)) 3889 return nfserr_stale_clientid; 3890 /* 3891 * In case we need it later, after we've already created the 3892 * file and don't want to risk a further failure: 3893 */ 3894 open->op_file = nfsd4_alloc_file(); 3895 if (open->op_file == NULL) 3896 return nfserr_jukebox; 3897 3898 status = lookup_clientid(clientid, cstate, nn); 3899 if (status) 3900 return status; 3901 clp = cstate->clp; 3902 3903 strhashval = ownerstr_hashval(&open->op_owner); 3904 oo = find_openstateowner_str(strhashval, open, clp); 3905 open->op_openowner = oo; 3906 if (!oo) { 3907 goto new_owner; 3908 } 3909 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 3910 /* Replace unconfirmed owners without checking for replay. */ 3911 release_openowner(oo); 3912 open->op_openowner = NULL; 3913 goto new_owner; 3914 } 3915 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 3916 if (status) 3917 return status; 3918 goto alloc_stateid; 3919 new_owner: 3920 oo = alloc_init_open_stateowner(strhashval, open, cstate); 3921 if (oo == NULL) 3922 return nfserr_jukebox; 3923 open->op_openowner = oo; 3924 alloc_stateid: 3925 open->op_stp = nfs4_alloc_open_stateid(clp); 3926 if (!open->op_stp) 3927 return nfserr_jukebox; 3928 3929 if (nfsd4_has_session(cstate) && 3930 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 3931 open->op_odstate = alloc_clnt_odstate(clp); 3932 if (!open->op_odstate) 3933 return nfserr_jukebox; 3934 } 3935 3936 return nfs_ok; 3937 } 3938 3939 static inline __be32 3940 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 3941 { 3942 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 3943 return nfserr_openmode; 3944 else 3945 return nfs_ok; 3946 } 3947 3948 static int share_access_to_flags(u32 share_access) 3949 { 3950 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 3951 } 3952 3953 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 3954 { 3955 struct nfs4_stid *ret; 3956 3957 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); 3958 if (!ret) 3959 return NULL; 3960 return delegstateid(ret); 3961 } 3962 3963 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 3964 { 3965 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 3966 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 3967 } 3968 3969 static __be32 3970 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 3971 struct nfs4_delegation **dp) 3972 { 3973 int flags; 3974 __be32 status = nfserr_bad_stateid; 3975 struct nfs4_delegation *deleg; 3976 3977 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 3978 if (deleg == NULL) 3979 goto out; 3980 flags = share_access_to_flags(open->op_share_access); 3981 status = nfs4_check_delegmode(deleg, flags); 3982 if (status) { 3983 nfs4_put_stid(&deleg->dl_stid); 3984 goto out; 3985 } 3986 *dp = deleg; 3987 out: 3988 if (!nfsd4_is_deleg_cur(open)) 3989 return nfs_ok; 3990 if (status) 3991 return status; 3992 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3993 return nfs_ok; 3994 } 3995 3996 static inline int nfs4_access_to_access(u32 nfs4_access) 3997 { 3998 int flags = 0; 3999 4000 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 4001 flags |= NFSD_MAY_READ; 4002 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 4003 flags |= NFSD_MAY_WRITE; 4004 return flags; 4005 } 4006 4007 static inline __be32 4008 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 4009 struct nfsd4_open *open) 4010 { 4011 struct iattr iattr = { 4012 .ia_valid = ATTR_SIZE, 4013 .ia_size = 0, 4014 }; 4015 if (!open->op_truncate) 4016 return 0; 4017 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 4018 return nfserr_inval; 4019 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 4020 } 4021 4022 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 4023 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 4024 struct nfsd4_open *open) 4025 { 4026 struct file *filp = NULL; 4027 __be32 status; 4028 int oflag = nfs4_access_to_omode(open->op_share_access); 4029 int access = nfs4_access_to_access(open->op_share_access); 4030 unsigned char old_access_bmap, old_deny_bmap; 4031 4032 spin_lock(&fp->fi_lock); 4033 4034 /* 4035 * Are we trying to set a deny mode that would conflict with 4036 * current access? 4037 */ 4038 status = nfs4_file_check_deny(fp, open->op_share_deny); 4039 if (status != nfs_ok) { 4040 spin_unlock(&fp->fi_lock); 4041 goto out; 4042 } 4043 4044 /* set access to the file */ 4045 status = nfs4_file_get_access(fp, open->op_share_access); 4046 if (status != nfs_ok) { 4047 spin_unlock(&fp->fi_lock); 4048 goto out; 4049 } 4050 4051 /* Set access bits in stateid */ 4052 old_access_bmap = stp->st_access_bmap; 4053 set_access(open->op_share_access, stp); 4054 4055 /* Set new deny mask */ 4056 old_deny_bmap = stp->st_deny_bmap; 4057 set_deny(open->op_share_deny, stp); 4058 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 4059 4060 if (!fp->fi_fds[oflag]) { 4061 spin_unlock(&fp->fi_lock); 4062 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp); 4063 if (status) 4064 goto out_put_access; 4065 spin_lock(&fp->fi_lock); 4066 if (!fp->fi_fds[oflag]) { 4067 fp->fi_fds[oflag] = filp; 4068 filp = NULL; 4069 } 4070 } 4071 spin_unlock(&fp->fi_lock); 4072 if (filp) 4073 fput(filp); 4074 4075 status = nfsd4_truncate(rqstp, cur_fh, open); 4076 if (status) 4077 goto out_put_access; 4078 out: 4079 return status; 4080 out_put_access: 4081 stp->st_access_bmap = old_access_bmap; 4082 nfs4_file_put_access(fp, open->op_share_access); 4083 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 4084 goto out; 4085 } 4086 4087 static __be32 4088 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 4089 { 4090 __be32 status; 4091 unsigned char old_deny_bmap = stp->st_deny_bmap; 4092 4093 if (!test_access(open->op_share_access, stp)) 4094 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open); 4095 4096 /* test and set deny mode */ 4097 spin_lock(&fp->fi_lock); 4098 status = nfs4_file_check_deny(fp, open->op_share_deny); 4099 if (status == nfs_ok) { 4100 set_deny(open->op_share_deny, stp); 4101 fp->fi_share_deny |= 4102 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 4103 } 4104 spin_unlock(&fp->fi_lock); 4105 4106 if (status != nfs_ok) 4107 return status; 4108 4109 status = nfsd4_truncate(rqstp, cur_fh, open); 4110 if (status != nfs_ok) 4111 reset_union_bmap_deny(old_deny_bmap, stp); 4112 return status; 4113 } 4114 4115 /* Should we give out recallable state?: */ 4116 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 4117 { 4118 if (clp->cl_cb_state == NFSD4_CB_UP) 4119 return true; 4120 /* 4121 * In the sessions case, since we don't have to establish a 4122 * separate connection for callbacks, we assume it's OK 4123 * until we hear otherwise: 4124 */ 4125 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 4126 } 4127 4128 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag) 4129 { 4130 struct file_lock *fl; 4131 4132 fl = locks_alloc_lock(); 4133 if (!fl) 4134 return NULL; 4135 fl->fl_lmops = &nfsd_lease_mng_ops; 4136 fl->fl_flags = FL_DELEG; 4137 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 4138 fl->fl_end = OFFSET_MAX; 4139 fl->fl_owner = (fl_owner_t)fp; 4140 fl->fl_pid = current->tgid; 4141 return fl; 4142 } 4143 4144 /** 4145 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer 4146 * @dp: a pointer to the nfs4_delegation we're adding. 4147 * 4148 * Return: 4149 * On success: Return code will be 0 on success. 4150 * 4151 * On error: -EAGAIN if there was an existing delegation. 4152 * nonzero if there is an error in other cases. 4153 * 4154 */ 4155 4156 static int nfs4_setlease(struct nfs4_delegation *dp) 4157 { 4158 struct nfs4_file *fp = dp->dl_stid.sc_file; 4159 struct file_lock *fl; 4160 struct file *filp; 4161 int status = 0; 4162 4163 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ); 4164 if (!fl) 4165 return -ENOMEM; 4166 filp = find_readable_file(fp); 4167 if (!filp) { 4168 /* We should always have a readable file here */ 4169 WARN_ON_ONCE(1); 4170 locks_free_lock(fl); 4171 return -EBADF; 4172 } 4173 fl->fl_file = filp; 4174 status = vfs_setlease(filp, fl->fl_type, &fl, NULL); 4175 if (fl) 4176 locks_free_lock(fl); 4177 if (status) 4178 goto out_fput; 4179 spin_lock(&state_lock); 4180 spin_lock(&fp->fi_lock); 4181 /* Did the lease get broken before we took the lock? */ 4182 status = -EAGAIN; 4183 if (fp->fi_had_conflict) 4184 goto out_unlock; 4185 /* Race breaker */ 4186 if (fp->fi_deleg_file) { 4187 status = hash_delegation_locked(dp, fp); 4188 goto out_unlock; 4189 } 4190 fp->fi_deleg_file = filp; 4191 fp->fi_delegees = 0; 4192 status = hash_delegation_locked(dp, fp); 4193 spin_unlock(&fp->fi_lock); 4194 spin_unlock(&state_lock); 4195 if (status) { 4196 /* Should never happen, this is a new fi_deleg_file */ 4197 WARN_ON_ONCE(1); 4198 goto out_fput; 4199 } 4200 return 0; 4201 out_unlock: 4202 spin_unlock(&fp->fi_lock); 4203 spin_unlock(&state_lock); 4204 out_fput: 4205 fput(filp); 4206 return status; 4207 } 4208 4209 static struct nfs4_delegation * 4210 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, 4211 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) 4212 { 4213 int status; 4214 struct nfs4_delegation *dp; 4215 4216 if (fp->fi_had_conflict) 4217 return ERR_PTR(-EAGAIN); 4218 4219 spin_lock(&state_lock); 4220 spin_lock(&fp->fi_lock); 4221 status = nfs4_get_existing_delegation(clp, fp); 4222 spin_unlock(&fp->fi_lock); 4223 spin_unlock(&state_lock); 4224 4225 if (status) 4226 return ERR_PTR(status); 4227 4228 dp = alloc_init_deleg(clp, fh, odstate); 4229 if (!dp) 4230 return ERR_PTR(-ENOMEM); 4231 4232 get_nfs4_file(fp); 4233 spin_lock(&state_lock); 4234 spin_lock(&fp->fi_lock); 4235 dp->dl_stid.sc_file = fp; 4236 if (!fp->fi_deleg_file) { 4237 spin_unlock(&fp->fi_lock); 4238 spin_unlock(&state_lock); 4239 status = nfs4_setlease(dp); 4240 goto out; 4241 } 4242 if (fp->fi_had_conflict) { 4243 status = -EAGAIN; 4244 goto out_unlock; 4245 } 4246 status = hash_delegation_locked(dp, fp); 4247 out_unlock: 4248 spin_unlock(&fp->fi_lock); 4249 spin_unlock(&state_lock); 4250 out: 4251 if (status) { 4252 put_clnt_odstate(dp->dl_clnt_odstate); 4253 nfs4_put_stid(&dp->dl_stid); 4254 return ERR_PTR(status); 4255 } 4256 return dp; 4257 } 4258 4259 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 4260 { 4261 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4262 if (status == -EAGAIN) 4263 open->op_why_no_deleg = WND4_CONTENTION; 4264 else { 4265 open->op_why_no_deleg = WND4_RESOURCE; 4266 switch (open->op_deleg_want) { 4267 case NFS4_SHARE_WANT_READ_DELEG: 4268 case NFS4_SHARE_WANT_WRITE_DELEG: 4269 case NFS4_SHARE_WANT_ANY_DELEG: 4270 break; 4271 case NFS4_SHARE_WANT_CANCEL: 4272 open->op_why_no_deleg = WND4_CANCELLED; 4273 break; 4274 case NFS4_SHARE_WANT_NO_DELEG: 4275 WARN_ON_ONCE(1); 4276 } 4277 } 4278 } 4279 4280 /* 4281 * Attempt to hand out a delegation. 4282 * 4283 * Note we don't support write delegations, and won't until the vfs has 4284 * proper support for them. 4285 */ 4286 static void 4287 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, 4288 struct nfs4_ol_stateid *stp) 4289 { 4290 struct nfs4_delegation *dp; 4291 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 4292 struct nfs4_client *clp = stp->st_stid.sc_client; 4293 int cb_up; 4294 int status = 0; 4295 4296 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 4297 open->op_recall = 0; 4298 switch (open->op_claim_type) { 4299 case NFS4_OPEN_CLAIM_PREVIOUS: 4300 if (!cb_up) 4301 open->op_recall = 1; 4302 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ) 4303 goto out_no_deleg; 4304 break; 4305 case NFS4_OPEN_CLAIM_NULL: 4306 case NFS4_OPEN_CLAIM_FH: 4307 /* 4308 * Let's not give out any delegations till everyone's 4309 * had the chance to reclaim theirs, *and* until 4310 * NLM locks have all been reclaimed: 4311 */ 4312 if (locks_in_grace(clp->net)) 4313 goto out_no_deleg; 4314 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 4315 goto out_no_deleg; 4316 /* 4317 * Also, if the file was opened for write or 4318 * create, there's a good chance the client's 4319 * about to write to it, resulting in an 4320 * immediate recall (since we don't support 4321 * write delegations): 4322 */ 4323 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 4324 goto out_no_deleg; 4325 if (open->op_create == NFS4_OPEN_CREATE) 4326 goto out_no_deleg; 4327 break; 4328 default: 4329 goto out_no_deleg; 4330 } 4331 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); 4332 if (IS_ERR(dp)) 4333 goto out_no_deleg; 4334 4335 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 4336 4337 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 4338 STATEID_VAL(&dp->dl_stid.sc_stateid)); 4339 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 4340 nfs4_put_stid(&dp->dl_stid); 4341 return; 4342 out_no_deleg: 4343 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 4344 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 4345 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { 4346 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 4347 open->op_recall = 1; 4348 } 4349 4350 /* 4.1 client asking for a delegation? */ 4351 if (open->op_deleg_want) 4352 nfsd4_open_deleg_none_ext(open, status); 4353 return; 4354 } 4355 4356 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 4357 struct nfs4_delegation *dp) 4358 { 4359 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 4360 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 4361 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4362 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 4363 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 4364 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 4365 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4366 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 4367 } 4368 /* Otherwise the client must be confused wanting a delegation 4369 * it already has, therefore we don't return 4370 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 4371 */ 4372 } 4373 4374 __be32 4375 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 4376 { 4377 struct nfsd4_compoundres *resp = rqstp->rq_resp; 4378 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 4379 struct nfs4_file *fp = NULL; 4380 struct nfs4_ol_stateid *stp = NULL; 4381 struct nfs4_delegation *dp = NULL; 4382 __be32 status; 4383 4384 /* 4385 * Lookup file; if found, lookup stateid and check open request, 4386 * and check for delegations in the process of being recalled. 4387 * If not found, create the nfs4_file struct 4388 */ 4389 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle); 4390 if (fp != open->op_file) { 4391 status = nfs4_check_deleg(cl, open, &dp); 4392 if (status) 4393 goto out; 4394 spin_lock(&fp->fi_lock); 4395 stp = nfsd4_find_existing_open(fp, open); 4396 spin_unlock(&fp->fi_lock); 4397 } else { 4398 open->op_file = NULL; 4399 status = nfserr_bad_stateid; 4400 if (nfsd4_is_deleg_cur(open)) 4401 goto out; 4402 } 4403 4404 /* 4405 * OPEN the file, or upgrade an existing OPEN. 4406 * If truncate fails, the OPEN fails. 4407 */ 4408 if (stp) { 4409 /* Stateid was found, this is an OPEN upgrade */ 4410 mutex_lock(&stp->st_mutex); 4411 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 4412 if (status) { 4413 mutex_unlock(&stp->st_mutex); 4414 goto out; 4415 } 4416 } else { 4417 /* stp is returned locked. */ 4418 stp = init_open_stateid(fp, open); 4419 /* See if we lost the race to some other thread */ 4420 if (stp->st_access_bmap != 0) { 4421 status = nfs4_upgrade_open(rqstp, fp, current_fh, 4422 stp, open); 4423 if (status) { 4424 mutex_unlock(&stp->st_mutex); 4425 goto out; 4426 } 4427 goto upgrade_out; 4428 } 4429 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); 4430 if (status) { 4431 mutex_unlock(&stp->st_mutex); 4432 release_open_stateid(stp); 4433 goto out; 4434 } 4435 4436 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 4437 open->op_odstate); 4438 if (stp->st_clnt_odstate == open->op_odstate) 4439 open->op_odstate = NULL; 4440 } 4441 upgrade_out: 4442 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 4443 mutex_unlock(&stp->st_mutex); 4444 4445 if (nfsd4_has_session(&resp->cstate)) { 4446 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 4447 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4448 open->op_why_no_deleg = WND4_NOT_WANTED; 4449 goto nodeleg; 4450 } 4451 } 4452 4453 /* 4454 * Attempt to hand out a delegation. No error return, because the 4455 * OPEN succeeds even if we fail. 4456 */ 4457 nfs4_open_delegation(current_fh, open, stp); 4458 nodeleg: 4459 status = nfs_ok; 4460 4461 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 4462 STATEID_VAL(&stp->st_stid.sc_stateid)); 4463 out: 4464 /* 4.1 client trying to upgrade/downgrade delegation? */ 4465 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 4466 open->op_deleg_want) 4467 nfsd4_deleg_xgrade_none_ext(open, dp); 4468 4469 if (fp) 4470 put_nfs4_file(fp); 4471 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 4472 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 4473 /* 4474 * To finish the open response, we just need to set the rflags. 4475 */ 4476 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 4477 if (nfsd4_has_session(&resp->cstate)) 4478 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; 4479 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) 4480 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 4481 4482 if (dp) 4483 nfs4_put_stid(&dp->dl_stid); 4484 if (stp) 4485 nfs4_put_stid(&stp->st_stid); 4486 4487 return status; 4488 } 4489 4490 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 4491 struct nfsd4_open *open) 4492 { 4493 if (open->op_openowner) { 4494 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; 4495 4496 nfsd4_cstate_assign_replay(cstate, so); 4497 nfs4_put_stateowner(so); 4498 } 4499 if (open->op_file) 4500 kmem_cache_free(file_slab, open->op_file); 4501 if (open->op_stp) 4502 nfs4_put_stid(&open->op_stp->st_stid); 4503 if (open->op_odstate) 4504 kmem_cache_free(odstate_slab, open->op_odstate); 4505 } 4506 4507 __be32 4508 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4509 clientid_t *clid) 4510 { 4511 struct nfs4_client *clp; 4512 __be32 status; 4513 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4514 4515 dprintk("process_renew(%08x/%08x): starting\n", 4516 clid->cl_boot, clid->cl_id); 4517 status = lookup_clientid(clid, cstate, nn); 4518 if (status) 4519 goto out; 4520 clp = cstate->clp; 4521 status = nfserr_cb_path_down; 4522 if (!list_empty(&clp->cl_delegations) 4523 && clp->cl_cb_state != NFSD4_CB_UP) 4524 goto out; 4525 status = nfs_ok; 4526 out: 4527 return status; 4528 } 4529 4530 void 4531 nfsd4_end_grace(struct nfsd_net *nn) 4532 { 4533 /* do nothing if grace period already ended */ 4534 if (nn->grace_ended) 4535 return; 4536 4537 dprintk("NFSD: end of grace period\n"); 4538 nn->grace_ended = true; 4539 /* 4540 * If the server goes down again right now, an NFSv4 4541 * client will still be allowed to reclaim after it comes back up, 4542 * even if it hasn't yet had a chance to reclaim state this time. 4543 * 4544 */ 4545 nfsd4_record_grace_done(nn); 4546 /* 4547 * At this point, NFSv4 clients can still reclaim. But if the 4548 * server crashes, any that have not yet reclaimed will be out 4549 * of luck on the next boot. 4550 * 4551 * (NFSv4.1+ clients are considered to have reclaimed once they 4552 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 4553 * have reclaimed after their first OPEN.) 4554 */ 4555 locks_end_grace(&nn->nfsd4_manager); 4556 /* 4557 * At this point, and once lockd and/or any other containers 4558 * exit their grace period, further reclaims will fail and 4559 * regular locking can resume. 4560 */ 4561 } 4562 4563 static time_t 4564 nfs4_laundromat(struct nfsd_net *nn) 4565 { 4566 struct nfs4_client *clp; 4567 struct nfs4_openowner *oo; 4568 struct nfs4_delegation *dp; 4569 struct nfs4_ol_stateid *stp; 4570 struct nfsd4_blocked_lock *nbl; 4571 struct list_head *pos, *next, reaplist; 4572 time_t cutoff = get_seconds() - nn->nfsd4_lease; 4573 time_t t, new_timeo = nn->nfsd4_lease; 4574 4575 dprintk("NFSD: laundromat service - starting\n"); 4576 nfsd4_end_grace(nn); 4577 INIT_LIST_HEAD(&reaplist); 4578 spin_lock(&nn->client_lock); 4579 list_for_each_safe(pos, next, &nn->client_lru) { 4580 clp = list_entry(pos, struct nfs4_client, cl_lru); 4581 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 4582 t = clp->cl_time - cutoff; 4583 new_timeo = min(new_timeo, t); 4584 break; 4585 } 4586 if (mark_client_expired_locked(clp)) { 4587 dprintk("NFSD: client in use (clientid %08x)\n", 4588 clp->cl_clientid.cl_id); 4589 continue; 4590 } 4591 list_add(&clp->cl_lru, &reaplist); 4592 } 4593 spin_unlock(&nn->client_lock); 4594 list_for_each_safe(pos, next, &reaplist) { 4595 clp = list_entry(pos, struct nfs4_client, cl_lru); 4596 dprintk("NFSD: purging unused client (clientid %08x)\n", 4597 clp->cl_clientid.cl_id); 4598 list_del_init(&clp->cl_lru); 4599 expire_client(clp); 4600 } 4601 spin_lock(&state_lock); 4602 list_for_each_safe(pos, next, &nn->del_recall_lru) { 4603 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4604 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 4605 t = dp->dl_time - cutoff; 4606 new_timeo = min(new_timeo, t); 4607 break; 4608 } 4609 WARN_ON(!unhash_delegation_locked(dp)); 4610 list_add(&dp->dl_recall_lru, &reaplist); 4611 } 4612 spin_unlock(&state_lock); 4613 while (!list_empty(&reaplist)) { 4614 dp = list_first_entry(&reaplist, struct nfs4_delegation, 4615 dl_recall_lru); 4616 list_del_init(&dp->dl_recall_lru); 4617 revoke_delegation(dp); 4618 } 4619 4620 spin_lock(&nn->client_lock); 4621 while (!list_empty(&nn->close_lru)) { 4622 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 4623 oo_close_lru); 4624 if (time_after((unsigned long)oo->oo_time, 4625 (unsigned long)cutoff)) { 4626 t = oo->oo_time - cutoff; 4627 new_timeo = min(new_timeo, t); 4628 break; 4629 } 4630 list_del_init(&oo->oo_close_lru); 4631 stp = oo->oo_last_closed_stid; 4632 oo->oo_last_closed_stid = NULL; 4633 spin_unlock(&nn->client_lock); 4634 nfs4_put_stid(&stp->st_stid); 4635 spin_lock(&nn->client_lock); 4636 } 4637 spin_unlock(&nn->client_lock); 4638 4639 /* 4640 * It's possible for a client to try and acquire an already held lock 4641 * that is being held for a long time, and then lose interest in it. 4642 * So, we clean out any un-revisited request after a lease period 4643 * under the assumption that the client is no longer interested. 4644 * 4645 * RFC5661, sec. 9.6 states that the client must not rely on getting 4646 * notifications and must continue to poll for locks, even when the 4647 * server supports them. Thus this shouldn't lead to clients blocking 4648 * indefinitely once the lock does become free. 4649 */ 4650 BUG_ON(!list_empty(&reaplist)); 4651 spin_lock(&nn->blocked_locks_lock); 4652 while (!list_empty(&nn->blocked_locks_lru)) { 4653 nbl = list_first_entry(&nn->blocked_locks_lru, 4654 struct nfsd4_blocked_lock, nbl_lru); 4655 if (time_after((unsigned long)nbl->nbl_time, 4656 (unsigned long)cutoff)) { 4657 t = nbl->nbl_time - cutoff; 4658 new_timeo = min(new_timeo, t); 4659 break; 4660 } 4661 list_move(&nbl->nbl_lru, &reaplist); 4662 list_del_init(&nbl->nbl_list); 4663 } 4664 spin_unlock(&nn->blocked_locks_lock); 4665 4666 while (!list_empty(&reaplist)) { 4667 nbl = list_first_entry(&nn->blocked_locks_lru, 4668 struct nfsd4_blocked_lock, nbl_lru); 4669 list_del_init(&nbl->nbl_lru); 4670 posix_unblock_lock(&nbl->nbl_lock); 4671 free_blocked_lock(nbl); 4672 } 4673 4674 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 4675 return new_timeo; 4676 } 4677 4678 static struct workqueue_struct *laundry_wq; 4679 static void laundromat_main(struct work_struct *); 4680 4681 static void 4682 laundromat_main(struct work_struct *laundry) 4683 { 4684 time_t t; 4685 struct delayed_work *dwork = to_delayed_work(laundry); 4686 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 4687 laundromat_work); 4688 4689 t = nfs4_laundromat(nn); 4690 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 4691 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 4692 } 4693 4694 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) 4695 { 4696 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) 4697 return nfserr_bad_stateid; 4698 return nfs_ok; 4699 } 4700 4701 static inline int 4702 access_permit_read(struct nfs4_ol_stateid *stp) 4703 { 4704 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 4705 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 4706 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 4707 } 4708 4709 static inline int 4710 access_permit_write(struct nfs4_ol_stateid *stp) 4711 { 4712 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 4713 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 4714 } 4715 4716 static 4717 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 4718 { 4719 __be32 status = nfserr_openmode; 4720 4721 /* For lock stateid's, we test the parent open, not the lock: */ 4722 if (stp->st_openstp) 4723 stp = stp->st_openstp; 4724 if ((flags & WR_STATE) && !access_permit_write(stp)) 4725 goto out; 4726 if ((flags & RD_STATE) && !access_permit_read(stp)) 4727 goto out; 4728 status = nfs_ok; 4729 out: 4730 return status; 4731 } 4732 4733 static inline __be32 4734 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 4735 { 4736 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 4737 return nfs_ok; 4738 else if (opens_in_grace(net)) { 4739 /* Answer in remaining cases depends on existence of 4740 * conflicting state; so we must wait out the grace period. */ 4741 return nfserr_grace; 4742 } else if (flags & WR_STATE) 4743 return nfs4_share_conflict(current_fh, 4744 NFS4_SHARE_DENY_WRITE); 4745 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 4746 return nfs4_share_conflict(current_fh, 4747 NFS4_SHARE_DENY_READ); 4748 } 4749 4750 /* 4751 * Allow READ/WRITE during grace period on recovered state only for files 4752 * that are not able to provide mandatory locking. 4753 */ 4754 static inline int 4755 grace_disallows_io(struct net *net, struct inode *inode) 4756 { 4757 return opens_in_grace(net) && mandatory_lock(inode); 4758 } 4759 4760 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 4761 { 4762 /* 4763 * When sessions are used the stateid generation number is ignored 4764 * when it is zero. 4765 */ 4766 if (has_session && in->si_generation == 0) 4767 return nfs_ok; 4768 4769 if (in->si_generation == ref->si_generation) 4770 return nfs_ok; 4771 4772 /* If the client sends us a stateid from the future, it's buggy: */ 4773 if (nfsd4_stateid_generation_after(in, ref)) 4774 return nfserr_bad_stateid; 4775 /* 4776 * However, we could see a stateid from the past, even from a 4777 * non-buggy client. For example, if the client sends a lock 4778 * while some IO is outstanding, the lock may bump si_generation 4779 * while the IO is still in flight. The client could avoid that 4780 * situation by waiting for responses on all the IO requests, 4781 * but better performance may result in retrying IO that 4782 * receives an old_stateid error if requests are rarely 4783 * reordered in flight: 4784 */ 4785 return nfserr_old_stateid; 4786 } 4787 4788 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 4789 { 4790 if (ols->st_stateowner->so_is_open_owner && 4791 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 4792 return nfserr_bad_stateid; 4793 return nfs_ok; 4794 } 4795 4796 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 4797 { 4798 struct nfs4_stid *s; 4799 __be32 status = nfserr_bad_stateid; 4800 4801 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4802 return status; 4803 /* Client debugging aid. */ 4804 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 4805 char addr_str[INET6_ADDRSTRLEN]; 4806 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, 4807 sizeof(addr_str)); 4808 pr_warn_ratelimited("NFSD: client %s testing state ID " 4809 "with incorrect client ID\n", addr_str); 4810 return status; 4811 } 4812 spin_lock(&cl->cl_lock); 4813 s = find_stateid_locked(cl, stateid); 4814 if (!s) 4815 goto out_unlock; 4816 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 4817 if (status) 4818 goto out_unlock; 4819 switch (s->sc_type) { 4820 case NFS4_DELEG_STID: 4821 status = nfs_ok; 4822 break; 4823 case NFS4_REVOKED_DELEG_STID: 4824 status = nfserr_deleg_revoked; 4825 break; 4826 case NFS4_OPEN_STID: 4827 case NFS4_LOCK_STID: 4828 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 4829 break; 4830 default: 4831 printk("unknown stateid type %x\n", s->sc_type); 4832 /* Fallthrough */ 4833 case NFS4_CLOSED_STID: 4834 case NFS4_CLOSED_DELEG_STID: 4835 status = nfserr_bad_stateid; 4836 } 4837 out_unlock: 4838 spin_unlock(&cl->cl_lock); 4839 return status; 4840 } 4841 4842 __be32 4843 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 4844 stateid_t *stateid, unsigned char typemask, 4845 struct nfs4_stid **s, struct nfsd_net *nn) 4846 { 4847 __be32 status; 4848 4849 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4850 return nfserr_bad_stateid; 4851 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); 4852 if (status == nfserr_stale_clientid) { 4853 if (cstate->session) 4854 return nfserr_bad_stateid; 4855 return nfserr_stale_stateid; 4856 } 4857 if (status) 4858 return status; 4859 *s = find_stateid_by_type(cstate->clp, stateid, typemask); 4860 if (!*s) 4861 return nfserr_bad_stateid; 4862 return nfs_ok; 4863 } 4864 4865 static struct file * 4866 nfs4_find_file(struct nfs4_stid *s, int flags) 4867 { 4868 if (!s) 4869 return NULL; 4870 4871 switch (s->sc_type) { 4872 case NFS4_DELEG_STID: 4873 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file)) 4874 return NULL; 4875 return get_file(s->sc_file->fi_deleg_file); 4876 case NFS4_OPEN_STID: 4877 case NFS4_LOCK_STID: 4878 if (flags & RD_STATE) 4879 return find_readable_file(s->sc_file); 4880 else 4881 return find_writeable_file(s->sc_file); 4882 break; 4883 } 4884 4885 return NULL; 4886 } 4887 4888 static __be32 4889 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags) 4890 { 4891 __be32 status; 4892 4893 status = nfsd4_check_openowner_confirmed(ols); 4894 if (status) 4895 return status; 4896 return nfs4_check_openmode(ols, flags); 4897 } 4898 4899 static __be32 4900 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, 4901 struct file **filpp, bool *tmp_file, int flags) 4902 { 4903 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; 4904 struct file *file; 4905 __be32 status; 4906 4907 file = nfs4_find_file(s, flags); 4908 if (file) { 4909 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, 4910 acc | NFSD_MAY_OWNER_OVERRIDE); 4911 if (status) { 4912 fput(file); 4913 return status; 4914 } 4915 4916 *filpp = file; 4917 } else { 4918 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp); 4919 if (status) 4920 return status; 4921 4922 if (tmp_file) 4923 *tmp_file = true; 4924 } 4925 4926 return 0; 4927 } 4928 4929 /* 4930 * Checks for stateid operations 4931 */ 4932 __be32 4933 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 4934 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 4935 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file) 4936 { 4937 struct inode *ino = d_inode(fhp->fh_dentry); 4938 struct net *net = SVC_NET(rqstp); 4939 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4940 struct nfs4_stid *s = NULL; 4941 __be32 status; 4942 4943 if (filpp) 4944 *filpp = NULL; 4945 if (tmp_file) 4946 *tmp_file = false; 4947 4948 if (grace_disallows_io(net, ino)) 4949 return nfserr_grace; 4950 4951 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 4952 status = check_special_stateids(net, fhp, stateid, flags); 4953 goto done; 4954 } 4955 4956 status = nfsd4_lookup_stateid(cstate, stateid, 4957 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 4958 &s, nn); 4959 if (status) 4960 return status; 4961 status = check_stateid_generation(stateid, &s->sc_stateid, 4962 nfsd4_has_session(cstate)); 4963 if (status) 4964 goto out; 4965 4966 switch (s->sc_type) { 4967 case NFS4_DELEG_STID: 4968 status = nfs4_check_delegmode(delegstateid(s), flags); 4969 break; 4970 case NFS4_OPEN_STID: 4971 case NFS4_LOCK_STID: 4972 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags); 4973 break; 4974 default: 4975 status = nfserr_bad_stateid; 4976 break; 4977 } 4978 if (status) 4979 goto out; 4980 status = nfs4_check_fh(fhp, s); 4981 4982 done: 4983 if (!status && filpp) 4984 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags); 4985 out: 4986 if (s) 4987 nfs4_put_stid(s); 4988 return status; 4989 } 4990 4991 /* 4992 * Test if the stateid is valid 4993 */ 4994 __be32 4995 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4996 struct nfsd4_test_stateid *test_stateid) 4997 { 4998 struct nfsd4_test_stateid_id *stateid; 4999 struct nfs4_client *cl = cstate->session->se_client; 5000 5001 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 5002 stateid->ts_id_status = 5003 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 5004 5005 return nfs_ok; 5006 } 5007 5008 static __be32 5009 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 5010 { 5011 struct nfs4_ol_stateid *stp = openlockstateid(s); 5012 __be32 ret; 5013 5014 mutex_lock(&stp->st_mutex); 5015 5016 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 5017 if (ret) 5018 goto out; 5019 5020 ret = nfserr_locks_held; 5021 if (check_for_locks(stp->st_stid.sc_file, 5022 lockowner(stp->st_stateowner))) 5023 goto out; 5024 5025 release_lock_stateid(stp); 5026 ret = nfs_ok; 5027 5028 out: 5029 mutex_unlock(&stp->st_mutex); 5030 nfs4_put_stid(s); 5031 return ret; 5032 } 5033 5034 __be32 5035 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5036 struct nfsd4_free_stateid *free_stateid) 5037 { 5038 stateid_t *stateid = &free_stateid->fr_stateid; 5039 struct nfs4_stid *s; 5040 struct nfs4_delegation *dp; 5041 struct nfs4_client *cl = cstate->session->se_client; 5042 __be32 ret = nfserr_bad_stateid; 5043 5044 spin_lock(&cl->cl_lock); 5045 s = find_stateid_locked(cl, stateid); 5046 if (!s) 5047 goto out_unlock; 5048 switch (s->sc_type) { 5049 case NFS4_DELEG_STID: 5050 ret = nfserr_locks_held; 5051 break; 5052 case NFS4_OPEN_STID: 5053 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 5054 if (ret) 5055 break; 5056 ret = nfserr_locks_held; 5057 break; 5058 case NFS4_LOCK_STID: 5059 atomic_inc(&s->sc_count); 5060 spin_unlock(&cl->cl_lock); 5061 ret = nfsd4_free_lock_stateid(stateid, s); 5062 goto out; 5063 case NFS4_REVOKED_DELEG_STID: 5064 dp = delegstateid(s); 5065 list_del_init(&dp->dl_recall_lru); 5066 spin_unlock(&cl->cl_lock); 5067 nfs4_put_stid(s); 5068 ret = nfs_ok; 5069 goto out; 5070 /* Default falls through and returns nfserr_bad_stateid */ 5071 } 5072 out_unlock: 5073 spin_unlock(&cl->cl_lock); 5074 out: 5075 return ret; 5076 } 5077 5078 static inline int 5079 setlkflg (int type) 5080 { 5081 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 5082 RD_STATE : WR_STATE; 5083 } 5084 5085 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 5086 { 5087 struct svc_fh *current_fh = &cstate->current_fh; 5088 struct nfs4_stateowner *sop = stp->st_stateowner; 5089 __be32 status; 5090 5091 status = nfsd4_check_seqid(cstate, sop, seqid); 5092 if (status) 5093 return status; 5094 if (stp->st_stid.sc_type == NFS4_CLOSED_STID 5095 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) 5096 /* 5097 * "Closed" stateid's exist *only* to return 5098 * nfserr_replay_me from the previous step, and 5099 * revoked delegations are kept only for free_stateid. 5100 */ 5101 return nfserr_bad_stateid; 5102 mutex_lock(&stp->st_mutex); 5103 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 5104 if (status == nfs_ok) 5105 status = nfs4_check_fh(current_fh, &stp->st_stid); 5106 if (status != nfs_ok) 5107 mutex_unlock(&stp->st_mutex); 5108 return status; 5109 } 5110 5111 /* 5112 * Checks for sequence id mutating operations. 5113 */ 5114 static __be32 5115 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 5116 stateid_t *stateid, char typemask, 5117 struct nfs4_ol_stateid **stpp, 5118 struct nfsd_net *nn) 5119 { 5120 __be32 status; 5121 struct nfs4_stid *s; 5122 struct nfs4_ol_stateid *stp = NULL; 5123 5124 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 5125 seqid, STATEID_VAL(stateid)); 5126 5127 *stpp = NULL; 5128 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); 5129 if (status) 5130 return status; 5131 stp = openlockstateid(s); 5132 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); 5133 5134 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 5135 if (!status) 5136 *stpp = stp; 5137 else 5138 nfs4_put_stid(&stp->st_stid); 5139 return status; 5140 } 5141 5142 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 5143 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 5144 { 5145 __be32 status; 5146 struct nfs4_openowner *oo; 5147 struct nfs4_ol_stateid *stp; 5148 5149 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 5150 NFS4_OPEN_STID, &stp, nn); 5151 if (status) 5152 return status; 5153 oo = openowner(stp->st_stateowner); 5154 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 5155 mutex_unlock(&stp->st_mutex); 5156 nfs4_put_stid(&stp->st_stid); 5157 return nfserr_bad_stateid; 5158 } 5159 *stpp = stp; 5160 return nfs_ok; 5161 } 5162 5163 __be32 5164 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5165 struct nfsd4_open_confirm *oc) 5166 { 5167 __be32 status; 5168 struct nfs4_openowner *oo; 5169 struct nfs4_ol_stateid *stp; 5170 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5171 5172 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 5173 cstate->current_fh.fh_dentry); 5174 5175 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 5176 if (status) 5177 return status; 5178 5179 status = nfs4_preprocess_seqid_op(cstate, 5180 oc->oc_seqid, &oc->oc_req_stateid, 5181 NFS4_OPEN_STID, &stp, nn); 5182 if (status) 5183 goto out; 5184 oo = openowner(stp->st_stateowner); 5185 status = nfserr_bad_stateid; 5186 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 5187 mutex_unlock(&stp->st_mutex); 5188 goto put_stateid; 5189 } 5190 oo->oo_flags |= NFS4_OO_CONFIRMED; 5191 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 5192 mutex_unlock(&stp->st_mutex); 5193 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 5194 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 5195 5196 nfsd4_client_record_create(oo->oo_owner.so_client); 5197 status = nfs_ok; 5198 put_stateid: 5199 nfs4_put_stid(&stp->st_stid); 5200 out: 5201 nfsd4_bump_seqid(cstate, status); 5202 return status; 5203 } 5204 5205 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 5206 { 5207 if (!test_access(access, stp)) 5208 return; 5209 nfs4_file_put_access(stp->st_stid.sc_file, access); 5210 clear_access(access, stp); 5211 } 5212 5213 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 5214 { 5215 switch (to_access) { 5216 case NFS4_SHARE_ACCESS_READ: 5217 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 5218 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 5219 break; 5220 case NFS4_SHARE_ACCESS_WRITE: 5221 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 5222 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 5223 break; 5224 case NFS4_SHARE_ACCESS_BOTH: 5225 break; 5226 default: 5227 WARN_ON_ONCE(1); 5228 } 5229 } 5230 5231 __be32 5232 nfsd4_open_downgrade(struct svc_rqst *rqstp, 5233 struct nfsd4_compound_state *cstate, 5234 struct nfsd4_open_downgrade *od) 5235 { 5236 __be32 status; 5237 struct nfs4_ol_stateid *stp; 5238 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5239 5240 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 5241 cstate->current_fh.fh_dentry); 5242 5243 /* We don't yet support WANT bits: */ 5244 if (od->od_deleg_want) 5245 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 5246 od->od_deleg_want); 5247 5248 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 5249 &od->od_stateid, &stp, nn); 5250 if (status) 5251 goto out; 5252 status = nfserr_inval; 5253 if (!test_access(od->od_share_access, stp)) { 5254 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 5255 stp->st_access_bmap, od->od_share_access); 5256 goto put_stateid; 5257 } 5258 if (!test_deny(od->od_share_deny, stp)) { 5259 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 5260 stp->st_deny_bmap, od->od_share_deny); 5261 goto put_stateid; 5262 } 5263 nfs4_stateid_downgrade(stp, od->od_share_access); 5264 reset_union_bmap_deny(od->od_share_deny, stp); 5265 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 5266 status = nfs_ok; 5267 put_stateid: 5268 mutex_unlock(&stp->st_mutex); 5269 nfs4_put_stid(&stp->st_stid); 5270 out: 5271 nfsd4_bump_seqid(cstate, status); 5272 return status; 5273 } 5274 5275 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 5276 { 5277 struct nfs4_client *clp = s->st_stid.sc_client; 5278 bool unhashed; 5279 LIST_HEAD(reaplist); 5280 5281 s->st_stid.sc_type = NFS4_CLOSED_STID; 5282 spin_lock(&clp->cl_lock); 5283 unhashed = unhash_open_stateid(s, &reaplist); 5284 5285 if (clp->cl_minorversion) { 5286 if (unhashed) 5287 put_ol_stateid_locked(s, &reaplist); 5288 spin_unlock(&clp->cl_lock); 5289 free_ol_stateid_reaplist(&reaplist); 5290 } else { 5291 spin_unlock(&clp->cl_lock); 5292 free_ol_stateid_reaplist(&reaplist); 5293 if (unhashed) 5294 move_to_close_lru(s, clp->net); 5295 } 5296 } 5297 5298 /* 5299 * nfs4_unlock_state() called after encode 5300 */ 5301 __be32 5302 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5303 struct nfsd4_close *close) 5304 { 5305 __be32 status; 5306 struct nfs4_ol_stateid *stp; 5307 struct net *net = SVC_NET(rqstp); 5308 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5309 5310 dprintk("NFSD: nfsd4_close on file %pd\n", 5311 cstate->current_fh.fh_dentry); 5312 5313 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 5314 &close->cl_stateid, 5315 NFS4_OPEN_STID|NFS4_CLOSED_STID, 5316 &stp, nn); 5317 nfsd4_bump_seqid(cstate, status); 5318 if (status) 5319 goto out; 5320 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 5321 mutex_unlock(&stp->st_mutex); 5322 5323 nfsd4_close_open_stateid(stp); 5324 5325 /* put reference from nfs4_preprocess_seqid_op */ 5326 nfs4_put_stid(&stp->st_stid); 5327 out: 5328 return status; 5329 } 5330 5331 __be32 5332 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5333 struct nfsd4_delegreturn *dr) 5334 { 5335 struct nfs4_delegation *dp; 5336 stateid_t *stateid = &dr->dr_stateid; 5337 struct nfs4_stid *s; 5338 __be32 status; 5339 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5340 5341 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 5342 return status; 5343 5344 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); 5345 if (status) 5346 goto out; 5347 dp = delegstateid(s); 5348 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); 5349 if (status) 5350 goto put_stateid; 5351 5352 destroy_delegation(dp); 5353 put_stateid: 5354 nfs4_put_stid(&dp->dl_stid); 5355 out: 5356 return status; 5357 } 5358 5359 static inline u64 5360 end_offset(u64 start, u64 len) 5361 { 5362 u64 end; 5363 5364 end = start + len; 5365 return end >= start ? end: NFS4_MAX_UINT64; 5366 } 5367 5368 /* last octet in a range */ 5369 static inline u64 5370 last_byte_offset(u64 start, u64 len) 5371 { 5372 u64 end; 5373 5374 WARN_ON_ONCE(!len); 5375 end = start + len; 5376 return end > start ? end - 1: NFS4_MAX_UINT64; 5377 } 5378 5379 /* 5380 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 5381 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 5382 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 5383 * locking, this prevents us from being completely protocol-compliant. The 5384 * real solution to this problem is to start using unsigned file offsets in 5385 * the VFS, but this is a very deep change! 5386 */ 5387 static inline void 5388 nfs4_transform_lock_offset(struct file_lock *lock) 5389 { 5390 if (lock->fl_start < 0) 5391 lock->fl_start = OFFSET_MAX; 5392 if (lock->fl_end < 0) 5393 lock->fl_end = OFFSET_MAX; 5394 } 5395 5396 static fl_owner_t 5397 nfsd4_fl_get_owner(fl_owner_t owner) 5398 { 5399 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 5400 5401 nfs4_get_stateowner(&lo->lo_owner); 5402 return owner; 5403 } 5404 5405 static void 5406 nfsd4_fl_put_owner(fl_owner_t owner) 5407 { 5408 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 5409 5410 if (lo) 5411 nfs4_put_stateowner(&lo->lo_owner); 5412 } 5413 5414 static void 5415 nfsd4_lm_notify(struct file_lock *fl) 5416 { 5417 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; 5418 struct net *net = lo->lo_owner.so_client->net; 5419 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5420 struct nfsd4_blocked_lock *nbl = container_of(fl, 5421 struct nfsd4_blocked_lock, nbl_lock); 5422 bool queue = false; 5423 5424 /* An empty list means that something else is going to be using it */ 5425 spin_lock(&nn->blocked_locks_lock); 5426 if (!list_empty(&nbl->nbl_list)) { 5427 list_del_init(&nbl->nbl_list); 5428 list_del_init(&nbl->nbl_lru); 5429 queue = true; 5430 } 5431 spin_unlock(&nn->blocked_locks_lock); 5432 5433 if (queue) 5434 nfsd4_run_cb(&nbl->nbl_cb); 5435 } 5436 5437 static const struct lock_manager_operations nfsd_posix_mng_ops = { 5438 .lm_notify = nfsd4_lm_notify, 5439 .lm_get_owner = nfsd4_fl_get_owner, 5440 .lm_put_owner = nfsd4_fl_put_owner, 5441 }; 5442 5443 static inline void 5444 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 5445 { 5446 struct nfs4_lockowner *lo; 5447 5448 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 5449 lo = (struct nfs4_lockowner *) fl->fl_owner; 5450 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, 5451 lo->lo_owner.so_owner.len, GFP_KERNEL); 5452 if (!deny->ld_owner.data) 5453 /* We just don't care that much */ 5454 goto nevermind; 5455 deny->ld_owner.len = lo->lo_owner.so_owner.len; 5456 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 5457 } else { 5458 nevermind: 5459 deny->ld_owner.len = 0; 5460 deny->ld_owner.data = NULL; 5461 deny->ld_clientid.cl_boot = 0; 5462 deny->ld_clientid.cl_id = 0; 5463 } 5464 deny->ld_start = fl->fl_start; 5465 deny->ld_length = NFS4_MAX_UINT64; 5466 if (fl->fl_end != NFS4_MAX_UINT64) 5467 deny->ld_length = fl->fl_end - fl->fl_start + 1; 5468 deny->ld_type = NFS4_READ_LT; 5469 if (fl->fl_type != F_RDLCK) 5470 deny->ld_type = NFS4_WRITE_LT; 5471 } 5472 5473 static struct nfs4_lockowner * 5474 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) 5475 { 5476 unsigned int strhashval = ownerstr_hashval(owner); 5477 struct nfs4_stateowner *so; 5478 5479 lockdep_assert_held(&clp->cl_lock); 5480 5481 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 5482 so_strhash) { 5483 if (so->so_is_open_owner) 5484 continue; 5485 if (same_owner_str(so, owner)) 5486 return lockowner(nfs4_get_stateowner(so)); 5487 } 5488 return NULL; 5489 } 5490 5491 static struct nfs4_lockowner * 5492 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) 5493 { 5494 struct nfs4_lockowner *lo; 5495 5496 spin_lock(&clp->cl_lock); 5497 lo = find_lockowner_str_locked(clp, owner); 5498 spin_unlock(&clp->cl_lock); 5499 return lo; 5500 } 5501 5502 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 5503 { 5504 unhash_lockowner_locked(lockowner(sop)); 5505 } 5506 5507 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 5508 { 5509 struct nfs4_lockowner *lo = lockowner(sop); 5510 5511 kmem_cache_free(lockowner_slab, lo); 5512 } 5513 5514 static const struct nfs4_stateowner_operations lockowner_ops = { 5515 .so_unhash = nfs4_unhash_lockowner, 5516 .so_free = nfs4_free_lockowner, 5517 }; 5518 5519 /* 5520 * Alloc a lock owner structure. 5521 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 5522 * occurred. 5523 * 5524 * strhashval = ownerstr_hashval 5525 */ 5526 static struct nfs4_lockowner * 5527 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 5528 struct nfs4_ol_stateid *open_stp, 5529 struct nfsd4_lock *lock) 5530 { 5531 struct nfs4_lockowner *lo, *ret; 5532 5533 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 5534 if (!lo) 5535 return NULL; 5536 INIT_LIST_HEAD(&lo->lo_blocked); 5537 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 5538 lo->lo_owner.so_is_open_owner = 0; 5539 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 5540 lo->lo_owner.so_ops = &lockowner_ops; 5541 spin_lock(&clp->cl_lock); 5542 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); 5543 if (ret == NULL) { 5544 list_add(&lo->lo_owner.so_strhash, 5545 &clp->cl_ownerstr_hashtbl[strhashval]); 5546 ret = lo; 5547 } else 5548 nfs4_free_stateowner(&lo->lo_owner); 5549 5550 spin_unlock(&clp->cl_lock); 5551 return ret; 5552 } 5553 5554 static void 5555 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 5556 struct nfs4_file *fp, struct inode *inode, 5557 struct nfs4_ol_stateid *open_stp) 5558 { 5559 struct nfs4_client *clp = lo->lo_owner.so_client; 5560 5561 lockdep_assert_held(&clp->cl_lock); 5562 5563 atomic_inc(&stp->st_stid.sc_count); 5564 stp->st_stid.sc_type = NFS4_LOCK_STID; 5565 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 5566 get_nfs4_file(fp); 5567 stp->st_stid.sc_file = fp; 5568 stp->st_access_bmap = 0; 5569 stp->st_deny_bmap = open_stp->st_deny_bmap; 5570 stp->st_openstp = open_stp; 5571 mutex_init(&stp->st_mutex); 5572 list_add(&stp->st_locks, &open_stp->st_locks); 5573 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 5574 spin_lock(&fp->fi_lock); 5575 list_add(&stp->st_perfile, &fp->fi_stateids); 5576 spin_unlock(&fp->fi_lock); 5577 } 5578 5579 static struct nfs4_ol_stateid * 5580 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) 5581 { 5582 struct nfs4_ol_stateid *lst; 5583 struct nfs4_client *clp = lo->lo_owner.so_client; 5584 5585 lockdep_assert_held(&clp->cl_lock); 5586 5587 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { 5588 if (lst->st_stid.sc_file == fp) { 5589 atomic_inc(&lst->st_stid.sc_count); 5590 return lst; 5591 } 5592 } 5593 return NULL; 5594 } 5595 5596 static struct nfs4_ol_stateid * 5597 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 5598 struct inode *inode, struct nfs4_ol_stateid *ost, 5599 bool *new) 5600 { 5601 struct nfs4_stid *ns = NULL; 5602 struct nfs4_ol_stateid *lst; 5603 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 5604 struct nfs4_client *clp = oo->oo_owner.so_client; 5605 5606 spin_lock(&clp->cl_lock); 5607 lst = find_lock_stateid(lo, fi); 5608 if (lst == NULL) { 5609 spin_unlock(&clp->cl_lock); 5610 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); 5611 if (ns == NULL) 5612 return NULL; 5613 5614 spin_lock(&clp->cl_lock); 5615 lst = find_lock_stateid(lo, fi); 5616 if (likely(!lst)) { 5617 lst = openlockstateid(ns); 5618 init_lock_stateid(lst, lo, fi, inode, ost); 5619 ns = NULL; 5620 *new = true; 5621 } 5622 } 5623 spin_unlock(&clp->cl_lock); 5624 if (ns) 5625 nfs4_put_stid(ns); 5626 return lst; 5627 } 5628 5629 static int 5630 check_lock_length(u64 offset, u64 length) 5631 { 5632 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 5633 (length > ~offset))); 5634 } 5635 5636 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 5637 { 5638 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 5639 5640 lockdep_assert_held(&fp->fi_lock); 5641 5642 if (test_access(access, lock_stp)) 5643 return; 5644 __nfs4_file_get_access(fp, access); 5645 set_access(access, lock_stp); 5646 } 5647 5648 static __be32 5649 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 5650 struct nfs4_ol_stateid *ost, 5651 struct nfsd4_lock *lock, 5652 struct nfs4_ol_stateid **plst, bool *new) 5653 { 5654 __be32 status; 5655 struct nfs4_file *fi = ost->st_stid.sc_file; 5656 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 5657 struct nfs4_client *cl = oo->oo_owner.so_client; 5658 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 5659 struct nfs4_lockowner *lo; 5660 struct nfs4_ol_stateid *lst; 5661 unsigned int strhashval; 5662 bool hashed; 5663 5664 lo = find_lockowner_str(cl, &lock->lk_new_owner); 5665 if (!lo) { 5666 strhashval = ownerstr_hashval(&lock->lk_new_owner); 5667 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 5668 if (lo == NULL) 5669 return nfserr_jukebox; 5670 } else { 5671 /* with an existing lockowner, seqids must be the same */ 5672 status = nfserr_bad_seqid; 5673 if (!cstate->minorversion && 5674 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 5675 goto out; 5676 } 5677 5678 retry: 5679 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 5680 if (lst == NULL) { 5681 status = nfserr_jukebox; 5682 goto out; 5683 } 5684 5685 mutex_lock(&lst->st_mutex); 5686 5687 /* See if it's still hashed to avoid race with FREE_STATEID */ 5688 spin_lock(&cl->cl_lock); 5689 hashed = !list_empty(&lst->st_perfile); 5690 spin_unlock(&cl->cl_lock); 5691 5692 if (!hashed) { 5693 mutex_unlock(&lst->st_mutex); 5694 nfs4_put_stid(&lst->st_stid); 5695 goto retry; 5696 } 5697 status = nfs_ok; 5698 *plst = lst; 5699 out: 5700 nfs4_put_stateowner(&lo->lo_owner); 5701 return status; 5702 } 5703 5704 /* 5705 * LOCK operation 5706 */ 5707 __be32 5708 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5709 struct nfsd4_lock *lock) 5710 { 5711 struct nfs4_openowner *open_sop = NULL; 5712 struct nfs4_lockowner *lock_sop = NULL; 5713 struct nfs4_ol_stateid *lock_stp = NULL; 5714 struct nfs4_ol_stateid *open_stp = NULL; 5715 struct nfs4_file *fp; 5716 struct file *filp = NULL; 5717 struct nfsd4_blocked_lock *nbl = NULL; 5718 struct file_lock *file_lock = NULL; 5719 struct file_lock *conflock = NULL; 5720 __be32 status = 0; 5721 int lkflg; 5722 int err; 5723 bool new = false; 5724 unsigned char fl_type; 5725 unsigned int fl_flags = FL_POSIX; 5726 struct net *net = SVC_NET(rqstp); 5727 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5728 5729 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 5730 (long long) lock->lk_offset, 5731 (long long) lock->lk_length); 5732 5733 if (check_lock_length(lock->lk_offset, lock->lk_length)) 5734 return nfserr_inval; 5735 5736 if ((status = fh_verify(rqstp, &cstate->current_fh, 5737 S_IFREG, NFSD_MAY_LOCK))) { 5738 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 5739 return status; 5740 } 5741 5742 if (lock->lk_is_new) { 5743 if (nfsd4_has_session(cstate)) 5744 /* See rfc 5661 18.10.3: given clientid is ignored: */ 5745 memcpy(&lock->lk_new_clientid, 5746 &cstate->session->se_client->cl_clientid, 5747 sizeof(clientid_t)); 5748 5749 status = nfserr_stale_clientid; 5750 if (STALE_CLIENTID(&lock->lk_new_clientid, nn)) 5751 goto out; 5752 5753 /* validate and update open stateid and open seqid */ 5754 status = nfs4_preprocess_confirmed_seqid_op(cstate, 5755 lock->lk_new_open_seqid, 5756 &lock->lk_new_open_stateid, 5757 &open_stp, nn); 5758 if (status) 5759 goto out; 5760 mutex_unlock(&open_stp->st_mutex); 5761 open_sop = openowner(open_stp->st_stateowner); 5762 status = nfserr_bad_stateid; 5763 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 5764 &lock->lk_new_clientid)) 5765 goto out; 5766 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5767 &lock_stp, &new); 5768 } else { 5769 status = nfs4_preprocess_seqid_op(cstate, 5770 lock->lk_old_lock_seqid, 5771 &lock->lk_old_lock_stateid, 5772 NFS4_LOCK_STID, &lock_stp, nn); 5773 } 5774 if (status) 5775 goto out; 5776 lock_sop = lockowner(lock_stp->st_stateowner); 5777 5778 lkflg = setlkflg(lock->lk_type); 5779 status = nfs4_check_openmode(lock_stp, lkflg); 5780 if (status) 5781 goto out; 5782 5783 status = nfserr_grace; 5784 if (locks_in_grace(net) && !lock->lk_reclaim) 5785 goto out; 5786 status = nfserr_no_grace; 5787 if (!locks_in_grace(net) && lock->lk_reclaim) 5788 goto out; 5789 5790 fp = lock_stp->st_stid.sc_file; 5791 switch (lock->lk_type) { 5792 case NFS4_READW_LT: 5793 if (nfsd4_has_session(cstate)) 5794 fl_flags |= FL_SLEEP; 5795 /* Fallthrough */ 5796 case NFS4_READ_LT: 5797 spin_lock(&fp->fi_lock); 5798 filp = find_readable_file_locked(fp); 5799 if (filp) 5800 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 5801 spin_unlock(&fp->fi_lock); 5802 fl_type = F_RDLCK; 5803 break; 5804 case NFS4_WRITEW_LT: 5805 if (nfsd4_has_session(cstate)) 5806 fl_flags |= FL_SLEEP; 5807 /* Fallthrough */ 5808 case NFS4_WRITE_LT: 5809 spin_lock(&fp->fi_lock); 5810 filp = find_writeable_file_locked(fp); 5811 if (filp) 5812 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 5813 spin_unlock(&fp->fi_lock); 5814 fl_type = F_WRLCK; 5815 break; 5816 default: 5817 status = nfserr_inval; 5818 goto out; 5819 } 5820 5821 if (!filp) { 5822 status = nfserr_openmode; 5823 goto out; 5824 } 5825 5826 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); 5827 if (!nbl) { 5828 dprintk("NFSD: %s: unable to allocate block!\n", __func__); 5829 status = nfserr_jukebox; 5830 goto out; 5831 } 5832 5833 file_lock = &nbl->nbl_lock; 5834 file_lock->fl_type = fl_type; 5835 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 5836 file_lock->fl_pid = current->tgid; 5837 file_lock->fl_file = filp; 5838 file_lock->fl_flags = fl_flags; 5839 file_lock->fl_lmops = &nfsd_posix_mng_ops; 5840 file_lock->fl_start = lock->lk_offset; 5841 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 5842 nfs4_transform_lock_offset(file_lock); 5843 5844 conflock = locks_alloc_lock(); 5845 if (!conflock) { 5846 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5847 status = nfserr_jukebox; 5848 goto out; 5849 } 5850 5851 if (fl_flags & FL_SLEEP) { 5852 nbl->nbl_time = jiffies; 5853 spin_lock(&nn->blocked_locks_lock); 5854 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 5855 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 5856 spin_unlock(&nn->blocked_locks_lock); 5857 } 5858 5859 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 5860 switch (err) { 5861 case 0: /* success! */ 5862 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 5863 status = 0; 5864 break; 5865 case FILE_LOCK_DEFERRED: 5866 nbl = NULL; 5867 /* Fallthrough */ 5868 case -EAGAIN: /* conflock holds conflicting lock */ 5869 status = nfserr_denied; 5870 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 5871 nfs4_set_lock_denied(conflock, &lock->lk_denied); 5872 break; 5873 case -EDEADLK: 5874 status = nfserr_deadlock; 5875 break; 5876 default: 5877 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 5878 status = nfserrno(err); 5879 break; 5880 } 5881 out: 5882 if (nbl) { 5883 /* dequeue it if we queued it before */ 5884 if (fl_flags & FL_SLEEP) { 5885 spin_lock(&nn->blocked_locks_lock); 5886 list_del_init(&nbl->nbl_list); 5887 list_del_init(&nbl->nbl_lru); 5888 spin_unlock(&nn->blocked_locks_lock); 5889 } 5890 free_blocked_lock(nbl); 5891 } 5892 if (filp) 5893 fput(filp); 5894 if (lock_stp) { 5895 /* Bump seqid manually if the 4.0 replay owner is openowner */ 5896 if (cstate->replay_owner && 5897 cstate->replay_owner != &lock_sop->lo_owner && 5898 seqid_mutating_err(ntohl(status))) 5899 lock_sop->lo_owner.so_seqid++; 5900 5901 mutex_unlock(&lock_stp->st_mutex); 5902 5903 /* 5904 * If this is a new, never-before-used stateid, and we are 5905 * returning an error, then just go ahead and release it. 5906 */ 5907 if (status && new) 5908 release_lock_stateid(lock_stp); 5909 5910 nfs4_put_stid(&lock_stp->st_stid); 5911 } 5912 if (open_stp) 5913 nfs4_put_stid(&open_stp->st_stid); 5914 nfsd4_bump_seqid(cstate, status); 5915 if (conflock) 5916 locks_free_lock(conflock); 5917 return status; 5918 } 5919 5920 /* 5921 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 5922 * so we do a temporary open here just to get an open file to pass to 5923 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 5924 * inode operation.) 5925 */ 5926 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 5927 { 5928 struct file *file; 5929 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 5930 if (!err) { 5931 err = nfserrno(vfs_test_lock(file, lock)); 5932 fput(file); 5933 } 5934 return err; 5935 } 5936 5937 /* 5938 * LOCKT operation 5939 */ 5940 __be32 5941 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5942 struct nfsd4_lockt *lockt) 5943 { 5944 struct file_lock *file_lock = NULL; 5945 struct nfs4_lockowner *lo = NULL; 5946 __be32 status; 5947 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5948 5949 if (locks_in_grace(SVC_NET(rqstp))) 5950 return nfserr_grace; 5951 5952 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 5953 return nfserr_inval; 5954 5955 if (!nfsd4_has_session(cstate)) { 5956 status = lookup_clientid(&lockt->lt_clientid, cstate, nn); 5957 if (status) 5958 goto out; 5959 } 5960 5961 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 5962 goto out; 5963 5964 file_lock = locks_alloc_lock(); 5965 if (!file_lock) { 5966 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5967 status = nfserr_jukebox; 5968 goto out; 5969 } 5970 5971 switch (lockt->lt_type) { 5972 case NFS4_READ_LT: 5973 case NFS4_READW_LT: 5974 file_lock->fl_type = F_RDLCK; 5975 break; 5976 case NFS4_WRITE_LT: 5977 case NFS4_WRITEW_LT: 5978 file_lock->fl_type = F_WRLCK; 5979 break; 5980 default: 5981 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 5982 status = nfserr_inval; 5983 goto out; 5984 } 5985 5986 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); 5987 if (lo) 5988 file_lock->fl_owner = (fl_owner_t)lo; 5989 file_lock->fl_pid = current->tgid; 5990 file_lock->fl_flags = FL_POSIX; 5991 5992 file_lock->fl_start = lockt->lt_offset; 5993 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 5994 5995 nfs4_transform_lock_offset(file_lock); 5996 5997 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 5998 if (status) 5999 goto out; 6000 6001 if (file_lock->fl_type != F_UNLCK) { 6002 status = nfserr_denied; 6003 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 6004 } 6005 out: 6006 if (lo) 6007 nfs4_put_stateowner(&lo->lo_owner); 6008 if (file_lock) 6009 locks_free_lock(file_lock); 6010 return status; 6011 } 6012 6013 __be32 6014 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6015 struct nfsd4_locku *locku) 6016 { 6017 struct nfs4_ol_stateid *stp; 6018 struct file *filp = NULL; 6019 struct file_lock *file_lock = NULL; 6020 __be32 status; 6021 int err; 6022 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6023 6024 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 6025 (long long) locku->lu_offset, 6026 (long long) locku->lu_length); 6027 6028 if (check_lock_length(locku->lu_offset, locku->lu_length)) 6029 return nfserr_inval; 6030 6031 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 6032 &locku->lu_stateid, NFS4_LOCK_STID, 6033 &stp, nn); 6034 if (status) 6035 goto out; 6036 filp = find_any_file(stp->st_stid.sc_file); 6037 if (!filp) { 6038 status = nfserr_lock_range; 6039 goto put_stateid; 6040 } 6041 file_lock = locks_alloc_lock(); 6042 if (!file_lock) { 6043 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 6044 status = nfserr_jukebox; 6045 goto fput; 6046 } 6047 6048 file_lock->fl_type = F_UNLCK; 6049 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 6050 file_lock->fl_pid = current->tgid; 6051 file_lock->fl_file = filp; 6052 file_lock->fl_flags = FL_POSIX; 6053 file_lock->fl_lmops = &nfsd_posix_mng_ops; 6054 file_lock->fl_start = locku->lu_offset; 6055 6056 file_lock->fl_end = last_byte_offset(locku->lu_offset, 6057 locku->lu_length); 6058 nfs4_transform_lock_offset(file_lock); 6059 6060 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); 6061 if (err) { 6062 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 6063 goto out_nfserr; 6064 } 6065 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); 6066 fput: 6067 fput(filp); 6068 put_stateid: 6069 mutex_unlock(&stp->st_mutex); 6070 nfs4_put_stid(&stp->st_stid); 6071 out: 6072 nfsd4_bump_seqid(cstate, status); 6073 if (file_lock) 6074 locks_free_lock(file_lock); 6075 return status; 6076 6077 out_nfserr: 6078 status = nfserrno(err); 6079 goto fput; 6080 } 6081 6082 /* 6083 * returns 6084 * true: locks held by lockowner 6085 * false: no locks held by lockowner 6086 */ 6087 static bool 6088 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 6089 { 6090 struct file_lock *fl; 6091 int status = false; 6092 struct file *filp = find_any_file(fp); 6093 struct inode *inode; 6094 struct file_lock_context *flctx; 6095 6096 if (!filp) { 6097 /* Any valid lock stateid should have some sort of access */ 6098 WARN_ON_ONCE(1); 6099 return status; 6100 } 6101 6102 inode = file_inode(filp); 6103 flctx = inode->i_flctx; 6104 6105 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 6106 spin_lock(&flctx->flc_lock); 6107 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 6108 if (fl->fl_owner == (fl_owner_t)lowner) { 6109 status = true; 6110 break; 6111 } 6112 } 6113 spin_unlock(&flctx->flc_lock); 6114 } 6115 fput(filp); 6116 return status; 6117 } 6118 6119 __be32 6120 nfsd4_release_lockowner(struct svc_rqst *rqstp, 6121 struct nfsd4_compound_state *cstate, 6122 struct nfsd4_release_lockowner *rlockowner) 6123 { 6124 clientid_t *clid = &rlockowner->rl_clientid; 6125 struct nfs4_stateowner *sop; 6126 struct nfs4_lockowner *lo = NULL; 6127 struct nfs4_ol_stateid *stp; 6128 struct xdr_netobj *owner = &rlockowner->rl_owner; 6129 unsigned int hashval = ownerstr_hashval(owner); 6130 __be32 status; 6131 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6132 struct nfs4_client *clp; 6133 LIST_HEAD (reaplist); 6134 6135 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 6136 clid->cl_boot, clid->cl_id); 6137 6138 status = lookup_clientid(clid, cstate, nn); 6139 if (status) 6140 return status; 6141 6142 clp = cstate->clp; 6143 /* Find the matching lock stateowner */ 6144 spin_lock(&clp->cl_lock); 6145 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval], 6146 so_strhash) { 6147 6148 if (sop->so_is_open_owner || !same_owner_str(sop, owner)) 6149 continue; 6150 6151 /* see if there are still any locks associated with it */ 6152 lo = lockowner(sop); 6153 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { 6154 if (check_for_locks(stp->st_stid.sc_file, lo)) { 6155 status = nfserr_locks_held; 6156 spin_unlock(&clp->cl_lock); 6157 return status; 6158 } 6159 } 6160 6161 nfs4_get_stateowner(sop); 6162 break; 6163 } 6164 if (!lo) { 6165 spin_unlock(&clp->cl_lock); 6166 return status; 6167 } 6168 6169 unhash_lockowner_locked(lo); 6170 while (!list_empty(&lo->lo_owner.so_stateids)) { 6171 stp = list_first_entry(&lo->lo_owner.so_stateids, 6172 struct nfs4_ol_stateid, 6173 st_perstateowner); 6174 WARN_ON(!unhash_lock_stateid(stp)); 6175 put_ol_stateid_locked(stp, &reaplist); 6176 } 6177 spin_unlock(&clp->cl_lock); 6178 free_ol_stateid_reaplist(&reaplist); 6179 nfs4_put_stateowner(&lo->lo_owner); 6180 6181 return status; 6182 } 6183 6184 static inline struct nfs4_client_reclaim * 6185 alloc_reclaim(void) 6186 { 6187 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 6188 } 6189 6190 bool 6191 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn) 6192 { 6193 struct nfs4_client_reclaim *crp; 6194 6195 crp = nfsd4_find_reclaim_client(name, nn); 6196 return (crp && crp->cr_clp); 6197 } 6198 6199 /* 6200 * failure => all reset bets are off, nfserr_no_grace... 6201 */ 6202 struct nfs4_client_reclaim * 6203 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn) 6204 { 6205 unsigned int strhashval; 6206 struct nfs4_client_reclaim *crp; 6207 6208 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 6209 crp = alloc_reclaim(); 6210 if (crp) { 6211 strhashval = clientstr_hashval(name); 6212 INIT_LIST_HEAD(&crp->cr_strhash); 6213 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 6214 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 6215 crp->cr_clp = NULL; 6216 nn->reclaim_str_hashtbl_size++; 6217 } 6218 return crp; 6219 } 6220 6221 void 6222 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 6223 { 6224 list_del(&crp->cr_strhash); 6225 kfree(crp); 6226 nn->reclaim_str_hashtbl_size--; 6227 } 6228 6229 void 6230 nfs4_release_reclaim(struct nfsd_net *nn) 6231 { 6232 struct nfs4_client_reclaim *crp = NULL; 6233 int i; 6234 6235 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6236 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 6237 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 6238 struct nfs4_client_reclaim, cr_strhash); 6239 nfs4_remove_reclaim_record(crp, nn); 6240 } 6241 } 6242 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 6243 } 6244 6245 /* 6246 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 6247 struct nfs4_client_reclaim * 6248 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) 6249 { 6250 unsigned int strhashval; 6251 struct nfs4_client_reclaim *crp = NULL; 6252 6253 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir); 6254 6255 strhashval = clientstr_hashval(recdir); 6256 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 6257 if (same_name(crp->cr_recdir, recdir)) { 6258 return crp; 6259 } 6260 } 6261 return NULL; 6262 } 6263 6264 /* 6265 * Called from OPEN. Look for clientid in reclaim list. 6266 */ 6267 __be32 6268 nfs4_check_open_reclaim(clientid_t *clid, 6269 struct nfsd4_compound_state *cstate, 6270 struct nfsd_net *nn) 6271 { 6272 __be32 status; 6273 6274 /* find clientid in conf_id_hashtbl */ 6275 status = lookup_clientid(clid, cstate, nn); 6276 if (status) 6277 return nfserr_reclaim_bad; 6278 6279 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags)) 6280 return nfserr_no_grace; 6281 6282 if (nfsd4_client_record_check(cstate->clp)) 6283 return nfserr_reclaim_bad; 6284 6285 return nfs_ok; 6286 } 6287 6288 #ifdef CONFIG_NFSD_FAULT_INJECTION 6289 static inline void 6290 put_client(struct nfs4_client *clp) 6291 { 6292 atomic_dec(&clp->cl_refcount); 6293 } 6294 6295 static struct nfs4_client * 6296 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) 6297 { 6298 struct nfs4_client *clp; 6299 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6300 nfsd_net_id); 6301 6302 if (!nfsd_netns_ready(nn)) 6303 return NULL; 6304 6305 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6306 if (memcmp(&clp->cl_addr, addr, addr_size) == 0) 6307 return clp; 6308 } 6309 return NULL; 6310 } 6311 6312 u64 6313 nfsd_inject_print_clients(void) 6314 { 6315 struct nfs4_client *clp; 6316 u64 count = 0; 6317 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6318 nfsd_net_id); 6319 char buf[INET6_ADDRSTRLEN]; 6320 6321 if (!nfsd_netns_ready(nn)) 6322 return 0; 6323 6324 spin_lock(&nn->client_lock); 6325 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6326 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 6327 pr_info("NFS Client: %s\n", buf); 6328 ++count; 6329 } 6330 spin_unlock(&nn->client_lock); 6331 6332 return count; 6333 } 6334 6335 u64 6336 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size) 6337 { 6338 u64 count = 0; 6339 struct nfs4_client *clp; 6340 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6341 nfsd_net_id); 6342 6343 if (!nfsd_netns_ready(nn)) 6344 return count; 6345 6346 spin_lock(&nn->client_lock); 6347 clp = nfsd_find_client(addr, addr_size); 6348 if (clp) { 6349 if (mark_client_expired_locked(clp) == nfs_ok) 6350 ++count; 6351 else 6352 clp = NULL; 6353 } 6354 spin_unlock(&nn->client_lock); 6355 6356 if (clp) 6357 expire_client(clp); 6358 6359 return count; 6360 } 6361 6362 u64 6363 nfsd_inject_forget_clients(u64 max) 6364 { 6365 u64 count = 0; 6366 struct nfs4_client *clp, *next; 6367 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6368 nfsd_net_id); 6369 LIST_HEAD(reaplist); 6370 6371 if (!nfsd_netns_ready(nn)) 6372 return count; 6373 6374 spin_lock(&nn->client_lock); 6375 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 6376 if (mark_client_expired_locked(clp) == nfs_ok) { 6377 list_add(&clp->cl_lru, &reaplist); 6378 if (max != 0 && ++count >= max) 6379 break; 6380 } 6381 } 6382 spin_unlock(&nn->client_lock); 6383 6384 list_for_each_entry_safe(clp, next, &reaplist, cl_lru) 6385 expire_client(clp); 6386 6387 return count; 6388 } 6389 6390 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, 6391 const char *type) 6392 { 6393 char buf[INET6_ADDRSTRLEN]; 6394 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 6395 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); 6396 } 6397 6398 static void 6399 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, 6400 struct list_head *collect) 6401 { 6402 struct nfs4_client *clp = lst->st_stid.sc_client; 6403 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6404 nfsd_net_id); 6405 6406 if (!collect) 6407 return; 6408 6409 lockdep_assert_held(&nn->client_lock); 6410 atomic_inc(&clp->cl_refcount); 6411 list_add(&lst->st_locks, collect); 6412 } 6413 6414 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, 6415 struct list_head *collect, 6416 bool (*func)(struct nfs4_ol_stateid *)) 6417 { 6418 struct nfs4_openowner *oop; 6419 struct nfs4_ol_stateid *stp, *st_next; 6420 struct nfs4_ol_stateid *lst, *lst_next; 6421 u64 count = 0; 6422 6423 spin_lock(&clp->cl_lock); 6424 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { 6425 list_for_each_entry_safe(stp, st_next, 6426 &oop->oo_owner.so_stateids, st_perstateowner) { 6427 list_for_each_entry_safe(lst, lst_next, 6428 &stp->st_locks, st_locks) { 6429 if (func) { 6430 if (func(lst)) 6431 nfsd_inject_add_lock_to_list(lst, 6432 collect); 6433 } 6434 ++count; 6435 /* 6436 * Despite the fact that these functions deal 6437 * with 64-bit integers for "count", we must 6438 * ensure that it doesn't blow up the 6439 * clp->cl_refcount. Throw a warning if we 6440 * start to approach INT_MAX here. 6441 */ 6442 WARN_ON_ONCE(count == (INT_MAX / 2)); 6443 if (count == max) 6444 goto out; 6445 } 6446 } 6447 } 6448 out: 6449 spin_unlock(&clp->cl_lock); 6450 6451 return count; 6452 } 6453 6454 static u64 6455 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect, 6456 u64 max) 6457 { 6458 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid); 6459 } 6460 6461 static u64 6462 nfsd_print_client_locks(struct nfs4_client *clp) 6463 { 6464 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL); 6465 nfsd_print_count(clp, count, "locked files"); 6466 return count; 6467 } 6468 6469 u64 6470 nfsd_inject_print_locks(void) 6471 { 6472 struct nfs4_client *clp; 6473 u64 count = 0; 6474 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6475 nfsd_net_id); 6476 6477 if (!nfsd_netns_ready(nn)) 6478 return 0; 6479 6480 spin_lock(&nn->client_lock); 6481 list_for_each_entry(clp, &nn->client_lru, cl_lru) 6482 count += nfsd_print_client_locks(clp); 6483 spin_unlock(&nn->client_lock); 6484 6485 return count; 6486 } 6487 6488 static void 6489 nfsd_reap_locks(struct list_head *reaplist) 6490 { 6491 struct nfs4_client *clp; 6492 struct nfs4_ol_stateid *stp, *next; 6493 6494 list_for_each_entry_safe(stp, next, reaplist, st_locks) { 6495 list_del_init(&stp->st_locks); 6496 clp = stp->st_stid.sc_client; 6497 nfs4_put_stid(&stp->st_stid); 6498 put_client(clp); 6499 } 6500 } 6501 6502 u64 6503 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size) 6504 { 6505 unsigned int count = 0; 6506 struct nfs4_client *clp; 6507 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6508 nfsd_net_id); 6509 LIST_HEAD(reaplist); 6510 6511 if (!nfsd_netns_ready(nn)) 6512 return count; 6513 6514 spin_lock(&nn->client_lock); 6515 clp = nfsd_find_client(addr, addr_size); 6516 if (clp) 6517 count = nfsd_collect_client_locks(clp, &reaplist, 0); 6518 spin_unlock(&nn->client_lock); 6519 nfsd_reap_locks(&reaplist); 6520 return count; 6521 } 6522 6523 u64 6524 nfsd_inject_forget_locks(u64 max) 6525 { 6526 u64 count = 0; 6527 struct nfs4_client *clp; 6528 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6529 nfsd_net_id); 6530 LIST_HEAD(reaplist); 6531 6532 if (!nfsd_netns_ready(nn)) 6533 return count; 6534 6535 spin_lock(&nn->client_lock); 6536 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6537 count += nfsd_collect_client_locks(clp, &reaplist, max - count); 6538 if (max != 0 && count >= max) 6539 break; 6540 } 6541 spin_unlock(&nn->client_lock); 6542 nfsd_reap_locks(&reaplist); 6543 return count; 6544 } 6545 6546 static u64 6547 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, 6548 struct list_head *collect, 6549 void (*func)(struct nfs4_openowner *)) 6550 { 6551 struct nfs4_openowner *oop, *next; 6552 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6553 nfsd_net_id); 6554 u64 count = 0; 6555 6556 lockdep_assert_held(&nn->client_lock); 6557 6558 spin_lock(&clp->cl_lock); 6559 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { 6560 if (func) { 6561 func(oop); 6562 if (collect) { 6563 atomic_inc(&clp->cl_refcount); 6564 list_add(&oop->oo_perclient, collect); 6565 } 6566 } 6567 ++count; 6568 /* 6569 * Despite the fact that these functions deal with 6570 * 64-bit integers for "count", we must ensure that 6571 * it doesn't blow up the clp->cl_refcount. Throw a 6572 * warning if we start to approach INT_MAX here. 6573 */ 6574 WARN_ON_ONCE(count == (INT_MAX / 2)); 6575 if (count == max) 6576 break; 6577 } 6578 spin_unlock(&clp->cl_lock); 6579 6580 return count; 6581 } 6582 6583 static u64 6584 nfsd_print_client_openowners(struct nfs4_client *clp) 6585 { 6586 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL); 6587 6588 nfsd_print_count(clp, count, "openowners"); 6589 return count; 6590 } 6591 6592 static u64 6593 nfsd_collect_client_openowners(struct nfs4_client *clp, 6594 struct list_head *collect, u64 max) 6595 { 6596 return nfsd_foreach_client_openowner(clp, max, collect, 6597 unhash_openowner_locked); 6598 } 6599 6600 u64 6601 nfsd_inject_print_openowners(void) 6602 { 6603 struct nfs4_client *clp; 6604 u64 count = 0; 6605 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6606 nfsd_net_id); 6607 6608 if (!nfsd_netns_ready(nn)) 6609 return 0; 6610 6611 spin_lock(&nn->client_lock); 6612 list_for_each_entry(clp, &nn->client_lru, cl_lru) 6613 count += nfsd_print_client_openowners(clp); 6614 spin_unlock(&nn->client_lock); 6615 6616 return count; 6617 } 6618 6619 static void 6620 nfsd_reap_openowners(struct list_head *reaplist) 6621 { 6622 struct nfs4_client *clp; 6623 struct nfs4_openowner *oop, *next; 6624 6625 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) { 6626 list_del_init(&oop->oo_perclient); 6627 clp = oop->oo_owner.so_client; 6628 release_openowner(oop); 6629 put_client(clp); 6630 } 6631 } 6632 6633 u64 6634 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr, 6635 size_t addr_size) 6636 { 6637 unsigned int count = 0; 6638 struct nfs4_client *clp; 6639 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6640 nfsd_net_id); 6641 LIST_HEAD(reaplist); 6642 6643 if (!nfsd_netns_ready(nn)) 6644 return count; 6645 6646 spin_lock(&nn->client_lock); 6647 clp = nfsd_find_client(addr, addr_size); 6648 if (clp) 6649 count = nfsd_collect_client_openowners(clp, &reaplist, 0); 6650 spin_unlock(&nn->client_lock); 6651 nfsd_reap_openowners(&reaplist); 6652 return count; 6653 } 6654 6655 u64 6656 nfsd_inject_forget_openowners(u64 max) 6657 { 6658 u64 count = 0; 6659 struct nfs4_client *clp; 6660 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6661 nfsd_net_id); 6662 LIST_HEAD(reaplist); 6663 6664 if (!nfsd_netns_ready(nn)) 6665 return count; 6666 6667 spin_lock(&nn->client_lock); 6668 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6669 count += nfsd_collect_client_openowners(clp, &reaplist, 6670 max - count); 6671 if (max != 0 && count >= max) 6672 break; 6673 } 6674 spin_unlock(&nn->client_lock); 6675 nfsd_reap_openowners(&reaplist); 6676 return count; 6677 } 6678 6679 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, 6680 struct list_head *victims) 6681 { 6682 struct nfs4_delegation *dp, *next; 6683 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6684 nfsd_net_id); 6685 u64 count = 0; 6686 6687 lockdep_assert_held(&nn->client_lock); 6688 6689 spin_lock(&state_lock); 6690 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { 6691 if (victims) { 6692 /* 6693 * It's not safe to mess with delegations that have a 6694 * non-zero dl_time. They might have already been broken 6695 * and could be processed by the laundromat outside of 6696 * the state_lock. Just leave them be. 6697 */ 6698 if (dp->dl_time != 0) 6699 continue; 6700 6701 atomic_inc(&clp->cl_refcount); 6702 WARN_ON(!unhash_delegation_locked(dp)); 6703 list_add(&dp->dl_recall_lru, victims); 6704 } 6705 ++count; 6706 /* 6707 * Despite the fact that these functions deal with 6708 * 64-bit integers for "count", we must ensure that 6709 * it doesn't blow up the clp->cl_refcount. Throw a 6710 * warning if we start to approach INT_MAX here. 6711 */ 6712 WARN_ON_ONCE(count == (INT_MAX / 2)); 6713 if (count == max) 6714 break; 6715 } 6716 spin_unlock(&state_lock); 6717 return count; 6718 } 6719 6720 static u64 6721 nfsd_print_client_delegations(struct nfs4_client *clp) 6722 { 6723 u64 count = nfsd_find_all_delegations(clp, 0, NULL); 6724 6725 nfsd_print_count(clp, count, "delegations"); 6726 return count; 6727 } 6728 6729 u64 6730 nfsd_inject_print_delegations(void) 6731 { 6732 struct nfs4_client *clp; 6733 u64 count = 0; 6734 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6735 nfsd_net_id); 6736 6737 if (!nfsd_netns_ready(nn)) 6738 return 0; 6739 6740 spin_lock(&nn->client_lock); 6741 list_for_each_entry(clp, &nn->client_lru, cl_lru) 6742 count += nfsd_print_client_delegations(clp); 6743 spin_unlock(&nn->client_lock); 6744 6745 return count; 6746 } 6747 6748 static void 6749 nfsd_forget_delegations(struct list_head *reaplist) 6750 { 6751 struct nfs4_client *clp; 6752 struct nfs4_delegation *dp, *next; 6753 6754 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { 6755 list_del_init(&dp->dl_recall_lru); 6756 clp = dp->dl_stid.sc_client; 6757 revoke_delegation(dp); 6758 put_client(clp); 6759 } 6760 } 6761 6762 u64 6763 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr, 6764 size_t addr_size) 6765 { 6766 u64 count = 0; 6767 struct nfs4_client *clp; 6768 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6769 nfsd_net_id); 6770 LIST_HEAD(reaplist); 6771 6772 if (!nfsd_netns_ready(nn)) 6773 return count; 6774 6775 spin_lock(&nn->client_lock); 6776 clp = nfsd_find_client(addr, addr_size); 6777 if (clp) 6778 count = nfsd_find_all_delegations(clp, 0, &reaplist); 6779 spin_unlock(&nn->client_lock); 6780 6781 nfsd_forget_delegations(&reaplist); 6782 return count; 6783 } 6784 6785 u64 6786 nfsd_inject_forget_delegations(u64 max) 6787 { 6788 u64 count = 0; 6789 struct nfs4_client *clp; 6790 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6791 nfsd_net_id); 6792 LIST_HEAD(reaplist); 6793 6794 if (!nfsd_netns_ready(nn)) 6795 return count; 6796 6797 spin_lock(&nn->client_lock); 6798 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6799 count += nfsd_find_all_delegations(clp, max - count, &reaplist); 6800 if (max != 0 && count >= max) 6801 break; 6802 } 6803 spin_unlock(&nn->client_lock); 6804 nfsd_forget_delegations(&reaplist); 6805 return count; 6806 } 6807 6808 static void 6809 nfsd_recall_delegations(struct list_head *reaplist) 6810 { 6811 struct nfs4_client *clp; 6812 struct nfs4_delegation *dp, *next; 6813 6814 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { 6815 list_del_init(&dp->dl_recall_lru); 6816 clp = dp->dl_stid.sc_client; 6817 /* 6818 * We skipped all entries that had a zero dl_time before, 6819 * so we can now reset the dl_time back to 0. If a delegation 6820 * break comes in now, then it won't make any difference since 6821 * we're recalling it either way. 6822 */ 6823 spin_lock(&state_lock); 6824 dp->dl_time = 0; 6825 spin_unlock(&state_lock); 6826 nfsd_break_one_deleg(dp); 6827 put_client(clp); 6828 } 6829 } 6830 6831 u64 6832 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr, 6833 size_t addr_size) 6834 { 6835 u64 count = 0; 6836 struct nfs4_client *clp; 6837 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6838 nfsd_net_id); 6839 LIST_HEAD(reaplist); 6840 6841 if (!nfsd_netns_ready(nn)) 6842 return count; 6843 6844 spin_lock(&nn->client_lock); 6845 clp = nfsd_find_client(addr, addr_size); 6846 if (clp) 6847 count = nfsd_find_all_delegations(clp, 0, &reaplist); 6848 spin_unlock(&nn->client_lock); 6849 6850 nfsd_recall_delegations(&reaplist); 6851 return count; 6852 } 6853 6854 u64 6855 nfsd_inject_recall_delegations(u64 max) 6856 { 6857 u64 count = 0; 6858 struct nfs4_client *clp, *next; 6859 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6860 nfsd_net_id); 6861 LIST_HEAD(reaplist); 6862 6863 if (!nfsd_netns_ready(nn)) 6864 return count; 6865 6866 spin_lock(&nn->client_lock); 6867 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 6868 count += nfsd_find_all_delegations(clp, max - count, &reaplist); 6869 if (max != 0 && ++count >= max) 6870 break; 6871 } 6872 spin_unlock(&nn->client_lock); 6873 nfsd_recall_delegations(&reaplist); 6874 return count; 6875 } 6876 #endif /* CONFIG_NFSD_FAULT_INJECTION */ 6877 6878 /* 6879 * Since the lifetime of a delegation isn't limited to that of an open, a 6880 * client may quite reasonably hang on to a delegation as long as it has 6881 * the inode cached. This becomes an obvious problem the first time a 6882 * client's inode cache approaches the size of the server's total memory. 6883 * 6884 * For now we avoid this problem by imposing a hard limit on the number 6885 * of delegations, which varies according to the server's memory size. 6886 */ 6887 static void 6888 set_max_delegations(void) 6889 { 6890 /* 6891 * Allow at most 4 delegations per megabyte of RAM. Quick 6892 * estimates suggest that in the worst case (where every delegation 6893 * is for a different inode), a delegation could take about 1.5K, 6894 * giving a worst case usage of about 6% of memory. 6895 */ 6896 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 6897 } 6898 6899 static int nfs4_state_create_net(struct net *net) 6900 { 6901 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6902 int i; 6903 6904 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) * 6905 CLIENT_HASH_SIZE, GFP_KERNEL); 6906 if (!nn->conf_id_hashtbl) 6907 goto err; 6908 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) * 6909 CLIENT_HASH_SIZE, GFP_KERNEL); 6910 if (!nn->unconf_id_hashtbl) 6911 goto err_unconf_id; 6912 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * 6913 SESSION_HASH_SIZE, GFP_KERNEL); 6914 if (!nn->sessionid_hashtbl) 6915 goto err_sessionid; 6916 6917 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6918 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 6919 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 6920 } 6921 for (i = 0; i < SESSION_HASH_SIZE; i++) 6922 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 6923 nn->conf_name_tree = RB_ROOT; 6924 nn->unconf_name_tree = RB_ROOT; 6925 INIT_LIST_HEAD(&nn->client_lru); 6926 INIT_LIST_HEAD(&nn->close_lru); 6927 INIT_LIST_HEAD(&nn->del_recall_lru); 6928 spin_lock_init(&nn->client_lock); 6929 6930 spin_lock_init(&nn->blocked_locks_lock); 6931 INIT_LIST_HEAD(&nn->blocked_locks_lru); 6932 6933 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 6934 get_net(net); 6935 6936 return 0; 6937 6938 err_sessionid: 6939 kfree(nn->unconf_id_hashtbl); 6940 err_unconf_id: 6941 kfree(nn->conf_id_hashtbl); 6942 err: 6943 return -ENOMEM; 6944 } 6945 6946 static void 6947 nfs4_state_destroy_net(struct net *net) 6948 { 6949 int i; 6950 struct nfs4_client *clp = NULL; 6951 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6952 6953 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6954 while (!list_empty(&nn->conf_id_hashtbl[i])) { 6955 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 6956 destroy_client(clp); 6957 } 6958 } 6959 6960 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6961 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 6962 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 6963 destroy_client(clp); 6964 } 6965 } 6966 6967 kfree(nn->sessionid_hashtbl); 6968 kfree(nn->unconf_id_hashtbl); 6969 kfree(nn->conf_id_hashtbl); 6970 put_net(net); 6971 } 6972 6973 int 6974 nfs4_state_start_net(struct net *net) 6975 { 6976 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6977 int ret; 6978 6979 ret = nfs4_state_create_net(net); 6980 if (ret) 6981 return ret; 6982 nn->boot_time = get_seconds(); 6983 nn->grace_ended = false; 6984 nn->nfsd4_manager.block_opens = true; 6985 locks_start_grace(net, &nn->nfsd4_manager); 6986 nfsd4_client_tracking_init(net); 6987 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", 6988 nn->nfsd4_grace, net); 6989 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 6990 return 0; 6991 } 6992 6993 /* initialization to perform when the nfsd service is started: */ 6994 6995 int 6996 nfs4_state_start(void) 6997 { 6998 int ret; 6999 7000 ret = set_callback_cred(); 7001 if (ret) 7002 return ret; 7003 7004 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); 7005 if (laundry_wq == NULL) { 7006 ret = -ENOMEM; 7007 goto out_cleanup_cred; 7008 } 7009 ret = nfsd4_create_callback_queue(); 7010 if (ret) 7011 goto out_free_laundry; 7012 7013 set_max_delegations(); 7014 return 0; 7015 7016 out_free_laundry: 7017 destroy_workqueue(laundry_wq); 7018 out_cleanup_cred: 7019 cleanup_callback_cred(); 7020 return ret; 7021 } 7022 7023 void 7024 nfs4_state_shutdown_net(struct net *net) 7025 { 7026 struct nfs4_delegation *dp = NULL; 7027 struct list_head *pos, *next, reaplist; 7028 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7029 struct nfsd4_blocked_lock *nbl; 7030 7031 cancel_delayed_work_sync(&nn->laundromat_work); 7032 locks_end_grace(&nn->nfsd4_manager); 7033 7034 INIT_LIST_HEAD(&reaplist); 7035 spin_lock(&state_lock); 7036 list_for_each_safe(pos, next, &nn->del_recall_lru) { 7037 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 7038 WARN_ON(!unhash_delegation_locked(dp)); 7039 list_add(&dp->dl_recall_lru, &reaplist); 7040 } 7041 spin_unlock(&state_lock); 7042 list_for_each_safe(pos, next, &reaplist) { 7043 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 7044 list_del_init(&dp->dl_recall_lru); 7045 put_clnt_odstate(dp->dl_clnt_odstate); 7046 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 7047 nfs4_put_stid(&dp->dl_stid); 7048 } 7049 7050 BUG_ON(!list_empty(&reaplist)); 7051 spin_lock(&nn->blocked_locks_lock); 7052 while (!list_empty(&nn->blocked_locks_lru)) { 7053 nbl = list_first_entry(&nn->blocked_locks_lru, 7054 struct nfsd4_blocked_lock, nbl_lru); 7055 list_move(&nbl->nbl_lru, &reaplist); 7056 list_del_init(&nbl->nbl_list); 7057 } 7058 spin_unlock(&nn->blocked_locks_lock); 7059 7060 while (!list_empty(&reaplist)) { 7061 nbl = list_first_entry(&nn->blocked_locks_lru, 7062 struct nfsd4_blocked_lock, nbl_lru); 7063 list_del_init(&nbl->nbl_lru); 7064 posix_unblock_lock(&nbl->nbl_lock); 7065 free_blocked_lock(nbl); 7066 } 7067 7068 nfsd4_client_tracking_exit(net); 7069 nfs4_state_destroy_net(net); 7070 } 7071 7072 void 7073 nfs4_state_shutdown(void) 7074 { 7075 destroy_workqueue(laundry_wq); 7076 nfsd4_destroy_callback_queue(); 7077 cleanup_callback_cred(); 7078 } 7079 7080 static void 7081 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7082 { 7083 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) 7084 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 7085 } 7086 7087 static void 7088 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7089 { 7090 if (cstate->minorversion) { 7091 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 7092 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 7093 } 7094 } 7095 7096 void 7097 clear_current_stateid(struct nfsd4_compound_state *cstate) 7098 { 7099 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 7100 } 7101 7102 /* 7103 * functions to set current state id 7104 */ 7105 void 7106 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 7107 { 7108 put_stateid(cstate, &odp->od_stateid); 7109 } 7110 7111 void 7112 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) 7113 { 7114 put_stateid(cstate, &open->op_stateid); 7115 } 7116 7117 void 7118 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 7119 { 7120 put_stateid(cstate, &close->cl_stateid); 7121 } 7122 7123 void 7124 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) 7125 { 7126 put_stateid(cstate, &lock->lk_resp_stateid); 7127 } 7128 7129 /* 7130 * functions to consume current state id 7131 */ 7132 7133 void 7134 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 7135 { 7136 get_stateid(cstate, &odp->od_stateid); 7137 } 7138 7139 void 7140 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) 7141 { 7142 get_stateid(cstate, &drp->dr_stateid); 7143 } 7144 7145 void 7146 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) 7147 { 7148 get_stateid(cstate, &fsp->fr_stateid); 7149 } 7150 7151 void 7152 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) 7153 { 7154 get_stateid(cstate, &setattr->sa_stateid); 7155 } 7156 7157 void 7158 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 7159 { 7160 get_stateid(cstate, &close->cl_stateid); 7161 } 7162 7163 void 7164 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) 7165 { 7166 get_stateid(cstate, &locku->lu_stateid); 7167 } 7168 7169 void 7170 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) 7171 { 7172 get_stateid(cstate, &read->rd_stateid); 7173 } 7174 7175 void 7176 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) 7177 { 7178 get_stateid(cstate, &write->wr_stateid); 7179 } 7180