1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include "xdr4.h" 46 #include "xdr4cb.h" 47 #include "vfs.h" 48 #include "current_stateid.h" 49 50 #include "netns.h" 51 #include "pnfs.h" 52 53 #define NFSDDBG_FACILITY NFSDDBG_PROC 54 55 #define all_ones {{~0,~0},~0} 56 static const stateid_t one_stateid = { 57 .si_generation = ~0, 58 .si_opaque = all_ones, 59 }; 60 static const stateid_t zero_stateid = { 61 /* all fields zero */ 62 }; 63 static const stateid_t currentstateid = { 64 .si_generation = 1, 65 }; 66 67 static u64 current_sessionid = 1; 68 69 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 70 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 71 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 72 73 /* forward declarations */ 74 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 75 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 76 77 /* Locking: */ 78 79 /* 80 * Currently used for the del_recall_lru and file hash table. In an 81 * effort to decrease the scope of the client_mutex, this spinlock may 82 * eventually cover more: 83 */ 84 static DEFINE_SPINLOCK(state_lock); 85 86 /* 87 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 88 * the refcount on the open stateid to drop. 89 */ 90 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 91 92 static struct kmem_cache *openowner_slab; 93 static struct kmem_cache *lockowner_slab; 94 static struct kmem_cache *file_slab; 95 static struct kmem_cache *stateid_slab; 96 static struct kmem_cache *deleg_slab; 97 static struct kmem_cache *odstate_slab; 98 99 static void free_session(struct nfsd4_session *); 100 101 static struct nfsd4_callback_ops nfsd4_cb_recall_ops; 102 103 static bool is_session_dead(struct nfsd4_session *ses) 104 { 105 return ses->se_flags & NFS4_SESSION_DEAD; 106 } 107 108 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 109 { 110 if (atomic_read(&ses->se_ref) > ref_held_by_me) 111 return nfserr_jukebox; 112 ses->se_flags |= NFS4_SESSION_DEAD; 113 return nfs_ok; 114 } 115 116 static bool is_client_expired(struct nfs4_client *clp) 117 { 118 return clp->cl_time == 0; 119 } 120 121 static __be32 get_client_locked(struct nfs4_client *clp) 122 { 123 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 124 125 lockdep_assert_held(&nn->client_lock); 126 127 if (is_client_expired(clp)) 128 return nfserr_expired; 129 atomic_inc(&clp->cl_refcount); 130 return nfs_ok; 131 } 132 133 /* must be called under the client_lock */ 134 static inline void 135 renew_client_locked(struct nfs4_client *clp) 136 { 137 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 138 139 if (is_client_expired(clp)) { 140 WARN_ON(1); 141 printk("%s: client (clientid %08x/%08x) already expired\n", 142 __func__, 143 clp->cl_clientid.cl_boot, 144 clp->cl_clientid.cl_id); 145 return; 146 } 147 148 dprintk("renewing client (clientid %08x/%08x)\n", 149 clp->cl_clientid.cl_boot, 150 clp->cl_clientid.cl_id); 151 list_move_tail(&clp->cl_lru, &nn->client_lru); 152 clp->cl_time = get_seconds(); 153 } 154 155 static void put_client_renew_locked(struct nfs4_client *clp) 156 { 157 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 158 159 lockdep_assert_held(&nn->client_lock); 160 161 if (!atomic_dec_and_test(&clp->cl_refcount)) 162 return; 163 if (!is_client_expired(clp)) 164 renew_client_locked(clp); 165 } 166 167 static void put_client_renew(struct nfs4_client *clp) 168 { 169 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 170 171 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) 172 return; 173 if (!is_client_expired(clp)) 174 renew_client_locked(clp); 175 spin_unlock(&nn->client_lock); 176 } 177 178 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 179 { 180 __be32 status; 181 182 if (is_session_dead(ses)) 183 return nfserr_badsession; 184 status = get_client_locked(ses->se_client); 185 if (status) 186 return status; 187 atomic_inc(&ses->se_ref); 188 return nfs_ok; 189 } 190 191 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 192 { 193 struct nfs4_client *clp = ses->se_client; 194 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 195 196 lockdep_assert_held(&nn->client_lock); 197 198 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 199 free_session(ses); 200 put_client_renew_locked(clp); 201 } 202 203 static void nfsd4_put_session(struct nfsd4_session *ses) 204 { 205 struct nfs4_client *clp = ses->se_client; 206 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 207 208 spin_lock(&nn->client_lock); 209 nfsd4_put_session_locked(ses); 210 spin_unlock(&nn->client_lock); 211 } 212 213 static inline struct nfs4_stateowner * 214 nfs4_get_stateowner(struct nfs4_stateowner *sop) 215 { 216 atomic_inc(&sop->so_count); 217 return sop; 218 } 219 220 static int 221 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 222 { 223 return (sop->so_owner.len == owner->len) && 224 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 225 } 226 227 static struct nfs4_openowner * 228 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, 229 struct nfs4_client *clp) 230 { 231 struct nfs4_stateowner *so; 232 233 lockdep_assert_held(&clp->cl_lock); 234 235 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 236 so_strhash) { 237 if (!so->so_is_open_owner) 238 continue; 239 if (same_owner_str(so, &open->op_owner)) 240 return openowner(nfs4_get_stateowner(so)); 241 } 242 return NULL; 243 } 244 245 static struct nfs4_openowner * 246 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 247 struct nfs4_client *clp) 248 { 249 struct nfs4_openowner *oo; 250 251 spin_lock(&clp->cl_lock); 252 oo = find_openstateowner_str_locked(hashval, open, clp); 253 spin_unlock(&clp->cl_lock); 254 return oo; 255 } 256 257 static inline u32 258 opaque_hashval(const void *ptr, int nbytes) 259 { 260 unsigned char *cptr = (unsigned char *) ptr; 261 262 u32 x = 0; 263 while (nbytes--) { 264 x *= 37; 265 x += *cptr++; 266 } 267 return x; 268 } 269 270 static void nfsd4_free_file_rcu(struct rcu_head *rcu) 271 { 272 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); 273 274 kmem_cache_free(file_slab, fp); 275 } 276 277 void 278 put_nfs4_file(struct nfs4_file *fi) 279 { 280 might_lock(&state_lock); 281 282 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { 283 hlist_del_rcu(&fi->fi_hash); 284 spin_unlock(&state_lock); 285 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 286 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 287 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 288 } 289 } 290 291 static struct file * 292 __nfs4_get_fd(struct nfs4_file *f, int oflag) 293 { 294 if (f->fi_fds[oflag]) 295 return get_file(f->fi_fds[oflag]); 296 return NULL; 297 } 298 299 static struct file * 300 find_writeable_file_locked(struct nfs4_file *f) 301 { 302 struct file *ret; 303 304 lockdep_assert_held(&f->fi_lock); 305 306 ret = __nfs4_get_fd(f, O_WRONLY); 307 if (!ret) 308 ret = __nfs4_get_fd(f, O_RDWR); 309 return ret; 310 } 311 312 static struct file * 313 find_writeable_file(struct nfs4_file *f) 314 { 315 struct file *ret; 316 317 spin_lock(&f->fi_lock); 318 ret = find_writeable_file_locked(f); 319 spin_unlock(&f->fi_lock); 320 321 return ret; 322 } 323 324 static struct file *find_readable_file_locked(struct nfs4_file *f) 325 { 326 struct file *ret; 327 328 lockdep_assert_held(&f->fi_lock); 329 330 ret = __nfs4_get_fd(f, O_RDONLY); 331 if (!ret) 332 ret = __nfs4_get_fd(f, O_RDWR); 333 return ret; 334 } 335 336 static struct file * 337 find_readable_file(struct nfs4_file *f) 338 { 339 struct file *ret; 340 341 spin_lock(&f->fi_lock); 342 ret = find_readable_file_locked(f); 343 spin_unlock(&f->fi_lock); 344 345 return ret; 346 } 347 348 struct file * 349 find_any_file(struct nfs4_file *f) 350 { 351 struct file *ret; 352 353 spin_lock(&f->fi_lock); 354 ret = __nfs4_get_fd(f, O_RDWR); 355 if (!ret) { 356 ret = __nfs4_get_fd(f, O_WRONLY); 357 if (!ret) 358 ret = __nfs4_get_fd(f, O_RDONLY); 359 } 360 spin_unlock(&f->fi_lock); 361 return ret; 362 } 363 364 static atomic_long_t num_delegations; 365 unsigned long max_delegations; 366 367 /* 368 * Open owner state (share locks) 369 */ 370 371 /* hash tables for lock and open owners */ 372 #define OWNER_HASH_BITS 8 373 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 374 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 375 376 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 377 { 378 unsigned int ret; 379 380 ret = opaque_hashval(ownername->data, ownername->len); 381 return ret & OWNER_HASH_MASK; 382 } 383 384 /* hash table for nfs4_file */ 385 #define FILE_HASH_BITS 8 386 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 387 388 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh) 389 { 390 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0); 391 } 392 393 static unsigned int file_hashval(struct knfsd_fh *fh) 394 { 395 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); 396 } 397 398 static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; 399 400 static void 401 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 402 { 403 lockdep_assert_held(&fp->fi_lock); 404 405 if (access & NFS4_SHARE_ACCESS_WRITE) 406 atomic_inc(&fp->fi_access[O_WRONLY]); 407 if (access & NFS4_SHARE_ACCESS_READ) 408 atomic_inc(&fp->fi_access[O_RDONLY]); 409 } 410 411 static __be32 412 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 413 { 414 lockdep_assert_held(&fp->fi_lock); 415 416 /* Does this access mode make sense? */ 417 if (access & ~NFS4_SHARE_ACCESS_BOTH) 418 return nfserr_inval; 419 420 /* Does it conflict with a deny mode already set? */ 421 if ((access & fp->fi_share_deny) != 0) 422 return nfserr_share_denied; 423 424 __nfs4_file_get_access(fp, access); 425 return nfs_ok; 426 } 427 428 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 429 { 430 /* Common case is that there is no deny mode. */ 431 if (deny) { 432 /* Does this deny mode make sense? */ 433 if (deny & ~NFS4_SHARE_DENY_BOTH) 434 return nfserr_inval; 435 436 if ((deny & NFS4_SHARE_DENY_READ) && 437 atomic_read(&fp->fi_access[O_RDONLY])) 438 return nfserr_share_denied; 439 440 if ((deny & NFS4_SHARE_DENY_WRITE) && 441 atomic_read(&fp->fi_access[O_WRONLY])) 442 return nfserr_share_denied; 443 } 444 return nfs_ok; 445 } 446 447 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 448 { 449 might_lock(&fp->fi_lock); 450 451 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 452 struct file *f1 = NULL; 453 struct file *f2 = NULL; 454 455 swap(f1, fp->fi_fds[oflag]); 456 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 457 swap(f2, fp->fi_fds[O_RDWR]); 458 spin_unlock(&fp->fi_lock); 459 if (f1) 460 fput(f1); 461 if (f2) 462 fput(f2); 463 } 464 } 465 466 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 467 { 468 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 469 470 if (access & NFS4_SHARE_ACCESS_WRITE) 471 __nfs4_file_put_access(fp, O_WRONLY); 472 if (access & NFS4_SHARE_ACCESS_READ) 473 __nfs4_file_put_access(fp, O_RDONLY); 474 } 475 476 /* 477 * Allocate a new open/delegation state counter. This is needed for 478 * pNFS for proper return on close semantics. 479 * 480 * Note that we only allocate it for pNFS-enabled exports, otherwise 481 * all pointers to struct nfs4_clnt_odstate are always NULL. 482 */ 483 static struct nfs4_clnt_odstate * 484 alloc_clnt_odstate(struct nfs4_client *clp) 485 { 486 struct nfs4_clnt_odstate *co; 487 488 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 489 if (co) { 490 co->co_client = clp; 491 atomic_set(&co->co_odcount, 1); 492 } 493 return co; 494 } 495 496 static void 497 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 498 { 499 struct nfs4_file *fp = co->co_file; 500 501 lockdep_assert_held(&fp->fi_lock); 502 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 503 } 504 505 static inline void 506 get_clnt_odstate(struct nfs4_clnt_odstate *co) 507 { 508 if (co) 509 atomic_inc(&co->co_odcount); 510 } 511 512 static void 513 put_clnt_odstate(struct nfs4_clnt_odstate *co) 514 { 515 struct nfs4_file *fp; 516 517 if (!co) 518 return; 519 520 fp = co->co_file; 521 if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 522 list_del(&co->co_perfile); 523 spin_unlock(&fp->fi_lock); 524 525 nfsd4_return_all_file_layouts(co->co_client, fp); 526 kmem_cache_free(odstate_slab, co); 527 } 528 } 529 530 static struct nfs4_clnt_odstate * 531 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 532 { 533 struct nfs4_clnt_odstate *co; 534 struct nfs4_client *cl; 535 536 if (!new) 537 return NULL; 538 539 cl = new->co_client; 540 541 spin_lock(&fp->fi_lock); 542 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 543 if (co->co_client == cl) { 544 get_clnt_odstate(co); 545 goto out; 546 } 547 } 548 co = new; 549 co->co_file = fp; 550 hash_clnt_odstate_locked(new); 551 out: 552 spin_unlock(&fp->fi_lock); 553 return co; 554 } 555 556 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 557 struct kmem_cache *slab) 558 { 559 struct nfs4_stid *stid; 560 int new_id; 561 562 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 563 if (!stid) 564 return NULL; 565 566 idr_preload(GFP_KERNEL); 567 spin_lock(&cl->cl_lock); 568 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT); 569 spin_unlock(&cl->cl_lock); 570 idr_preload_end(); 571 if (new_id < 0) 572 goto out_free; 573 stid->sc_client = cl; 574 stid->sc_stateid.si_opaque.so_id = new_id; 575 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 576 /* Will be incremented before return to client: */ 577 atomic_set(&stid->sc_count, 1); 578 579 /* 580 * It shouldn't be a problem to reuse an opaque stateid value. 581 * I don't think it is for 4.1. But with 4.0 I worry that, for 582 * example, a stray write retransmission could be accepted by 583 * the server when it should have been rejected. Therefore, 584 * adopt a trick from the sctp code to attempt to maximize the 585 * amount of time until an id is reused, by ensuring they always 586 * "increase" (mod INT_MAX): 587 */ 588 return stid; 589 out_free: 590 kmem_cache_free(slab, stid); 591 return NULL; 592 } 593 594 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 595 { 596 struct nfs4_stid *stid; 597 struct nfs4_ol_stateid *stp; 598 599 stid = nfs4_alloc_stid(clp, stateid_slab); 600 if (!stid) 601 return NULL; 602 603 stp = openlockstateid(stid); 604 stp->st_stid.sc_free = nfs4_free_ol_stateid; 605 return stp; 606 } 607 608 static void nfs4_free_deleg(struct nfs4_stid *stid) 609 { 610 kmem_cache_free(deleg_slab, stid); 611 atomic_long_dec(&num_delegations); 612 } 613 614 /* 615 * When we recall a delegation, we should be careful not to hand it 616 * out again straight away. 617 * To ensure this we keep a pair of bloom filters ('new' and 'old') 618 * in which the filehandles of recalled delegations are "stored". 619 * If a filehandle appear in either filter, a delegation is blocked. 620 * When a delegation is recalled, the filehandle is stored in the "new" 621 * filter. 622 * Every 30 seconds we swap the filters and clear the "new" one, 623 * unless both are empty of course. 624 * 625 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 626 * low 3 bytes as hash-table indices. 627 * 628 * 'blocked_delegations_lock', which is always taken in block_delegations(), 629 * is used to manage concurrent access. Testing does not need the lock 630 * except when swapping the two filters. 631 */ 632 static DEFINE_SPINLOCK(blocked_delegations_lock); 633 static struct bloom_pair { 634 int entries, old_entries; 635 time_t swap_time; 636 int new; /* index into 'set' */ 637 DECLARE_BITMAP(set[2], 256); 638 } blocked_delegations; 639 640 static int delegation_blocked(struct knfsd_fh *fh) 641 { 642 u32 hash; 643 struct bloom_pair *bd = &blocked_delegations; 644 645 if (bd->entries == 0) 646 return 0; 647 if (seconds_since_boot() - bd->swap_time > 30) { 648 spin_lock(&blocked_delegations_lock); 649 if (seconds_since_boot() - bd->swap_time > 30) { 650 bd->entries -= bd->old_entries; 651 bd->old_entries = bd->entries; 652 memset(bd->set[bd->new], 0, 653 sizeof(bd->set[0])); 654 bd->new = 1-bd->new; 655 bd->swap_time = seconds_since_boot(); 656 } 657 spin_unlock(&blocked_delegations_lock); 658 } 659 hash = jhash(&fh->fh_base, fh->fh_size, 0); 660 if (test_bit(hash&255, bd->set[0]) && 661 test_bit((hash>>8)&255, bd->set[0]) && 662 test_bit((hash>>16)&255, bd->set[0])) 663 return 1; 664 665 if (test_bit(hash&255, bd->set[1]) && 666 test_bit((hash>>8)&255, bd->set[1]) && 667 test_bit((hash>>16)&255, bd->set[1])) 668 return 1; 669 670 return 0; 671 } 672 673 static void block_delegations(struct knfsd_fh *fh) 674 { 675 u32 hash; 676 struct bloom_pair *bd = &blocked_delegations; 677 678 hash = jhash(&fh->fh_base, fh->fh_size, 0); 679 680 spin_lock(&blocked_delegations_lock); 681 __set_bit(hash&255, bd->set[bd->new]); 682 __set_bit((hash>>8)&255, bd->set[bd->new]); 683 __set_bit((hash>>16)&255, bd->set[bd->new]); 684 if (bd->entries == 0) 685 bd->swap_time = seconds_since_boot(); 686 bd->entries += 1; 687 spin_unlock(&blocked_delegations_lock); 688 } 689 690 static struct nfs4_delegation * 691 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, 692 struct nfs4_clnt_odstate *odstate) 693 { 694 struct nfs4_delegation *dp; 695 long n; 696 697 dprintk("NFSD alloc_init_deleg\n"); 698 n = atomic_long_inc_return(&num_delegations); 699 if (n < 0 || n > max_delegations) 700 goto out_dec; 701 if (delegation_blocked(¤t_fh->fh_handle)) 702 goto out_dec; 703 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 704 if (dp == NULL) 705 goto out_dec; 706 707 dp->dl_stid.sc_free = nfs4_free_deleg; 708 /* 709 * delegation seqid's are never incremented. The 4.1 special 710 * meaning of seqid 0 isn't meaningful, really, but let's avoid 711 * 0 anyway just for consistency and use 1: 712 */ 713 dp->dl_stid.sc_stateid.si_generation = 1; 714 INIT_LIST_HEAD(&dp->dl_perfile); 715 INIT_LIST_HEAD(&dp->dl_perclnt); 716 INIT_LIST_HEAD(&dp->dl_recall_lru); 717 dp->dl_clnt_odstate = odstate; 718 get_clnt_odstate(odstate); 719 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 720 dp->dl_retries = 1; 721 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 722 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 723 return dp; 724 out_dec: 725 atomic_long_dec(&num_delegations); 726 return NULL; 727 } 728 729 void 730 nfs4_put_stid(struct nfs4_stid *s) 731 { 732 struct nfs4_file *fp = s->sc_file; 733 struct nfs4_client *clp = s->sc_client; 734 735 might_lock(&clp->cl_lock); 736 737 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 738 wake_up_all(&close_wq); 739 return; 740 } 741 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 742 spin_unlock(&clp->cl_lock); 743 s->sc_free(s); 744 if (fp) 745 put_nfs4_file(fp); 746 } 747 748 static void nfs4_put_deleg_lease(struct nfs4_file *fp) 749 { 750 struct file *filp = NULL; 751 752 spin_lock(&fp->fi_lock); 753 if (fp->fi_deleg_file && --fp->fi_delegees == 0) 754 swap(filp, fp->fi_deleg_file); 755 spin_unlock(&fp->fi_lock); 756 757 if (filp) { 758 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp); 759 fput(filp); 760 } 761 } 762 763 void nfs4_unhash_stid(struct nfs4_stid *s) 764 { 765 s->sc_type = 0; 766 } 767 768 static void 769 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 770 { 771 lockdep_assert_held(&state_lock); 772 lockdep_assert_held(&fp->fi_lock); 773 774 atomic_inc(&dp->dl_stid.sc_count); 775 dp->dl_stid.sc_type = NFS4_DELEG_STID; 776 list_add(&dp->dl_perfile, &fp->fi_delegations); 777 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 778 } 779 780 static void 781 unhash_delegation_locked(struct nfs4_delegation *dp) 782 { 783 struct nfs4_file *fp = dp->dl_stid.sc_file; 784 785 lockdep_assert_held(&state_lock); 786 787 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; 788 /* Ensure that deleg break won't try to requeue it */ 789 ++dp->dl_time; 790 spin_lock(&fp->fi_lock); 791 list_del_init(&dp->dl_perclnt); 792 list_del_init(&dp->dl_recall_lru); 793 list_del_init(&dp->dl_perfile); 794 spin_unlock(&fp->fi_lock); 795 } 796 797 static void destroy_delegation(struct nfs4_delegation *dp) 798 { 799 spin_lock(&state_lock); 800 unhash_delegation_locked(dp); 801 spin_unlock(&state_lock); 802 put_clnt_odstate(dp->dl_clnt_odstate); 803 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 804 nfs4_put_stid(&dp->dl_stid); 805 } 806 807 static void revoke_delegation(struct nfs4_delegation *dp) 808 { 809 struct nfs4_client *clp = dp->dl_stid.sc_client; 810 811 WARN_ON(!list_empty(&dp->dl_recall_lru)); 812 813 put_clnt_odstate(dp->dl_clnt_odstate); 814 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 815 816 if (clp->cl_minorversion == 0) 817 nfs4_put_stid(&dp->dl_stid); 818 else { 819 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 820 spin_lock(&clp->cl_lock); 821 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 822 spin_unlock(&clp->cl_lock); 823 } 824 } 825 826 /* 827 * SETCLIENTID state 828 */ 829 830 static unsigned int clientid_hashval(u32 id) 831 { 832 return id & CLIENT_HASH_MASK; 833 } 834 835 static unsigned int clientstr_hashval(const char *name) 836 { 837 return opaque_hashval(name, 8) & CLIENT_HASH_MASK; 838 } 839 840 /* 841 * We store the NONE, READ, WRITE, and BOTH bits separately in the 842 * st_{access,deny}_bmap field of the stateid, in order to track not 843 * only what share bits are currently in force, but also what 844 * combinations of share bits previous opens have used. This allows us 845 * to enforce the recommendation of rfc 3530 14.2.19 that the server 846 * return an error if the client attempt to downgrade to a combination 847 * of share bits not explicable by closing some of its previous opens. 848 * 849 * XXX: This enforcement is actually incomplete, since we don't keep 850 * track of access/deny bit combinations; so, e.g., we allow: 851 * 852 * OPEN allow read, deny write 853 * OPEN allow both, deny none 854 * DOWNGRADE allow read, deny none 855 * 856 * which we should reject. 857 */ 858 static unsigned int 859 bmap_to_share_mode(unsigned long bmap) { 860 int i; 861 unsigned int access = 0; 862 863 for (i = 1; i < 4; i++) { 864 if (test_bit(i, &bmap)) 865 access |= i; 866 } 867 return access; 868 } 869 870 /* set share access for a given stateid */ 871 static inline void 872 set_access(u32 access, struct nfs4_ol_stateid *stp) 873 { 874 unsigned char mask = 1 << access; 875 876 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 877 stp->st_access_bmap |= mask; 878 } 879 880 /* clear share access for a given stateid */ 881 static inline void 882 clear_access(u32 access, struct nfs4_ol_stateid *stp) 883 { 884 unsigned char mask = 1 << access; 885 886 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 887 stp->st_access_bmap &= ~mask; 888 } 889 890 /* test whether a given stateid has access */ 891 static inline bool 892 test_access(u32 access, struct nfs4_ol_stateid *stp) 893 { 894 unsigned char mask = 1 << access; 895 896 return (bool)(stp->st_access_bmap & mask); 897 } 898 899 /* set share deny for a given stateid */ 900 static inline void 901 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 902 { 903 unsigned char mask = 1 << deny; 904 905 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 906 stp->st_deny_bmap |= mask; 907 } 908 909 /* clear share deny for a given stateid */ 910 static inline void 911 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 912 { 913 unsigned char mask = 1 << deny; 914 915 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 916 stp->st_deny_bmap &= ~mask; 917 } 918 919 /* test whether a given stateid is denying specific access */ 920 static inline bool 921 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 922 { 923 unsigned char mask = 1 << deny; 924 925 return (bool)(stp->st_deny_bmap & mask); 926 } 927 928 static int nfs4_access_to_omode(u32 access) 929 { 930 switch (access & NFS4_SHARE_ACCESS_BOTH) { 931 case NFS4_SHARE_ACCESS_READ: 932 return O_RDONLY; 933 case NFS4_SHARE_ACCESS_WRITE: 934 return O_WRONLY; 935 case NFS4_SHARE_ACCESS_BOTH: 936 return O_RDWR; 937 } 938 WARN_ON_ONCE(1); 939 return O_RDONLY; 940 } 941 942 /* 943 * A stateid that had a deny mode associated with it is being released 944 * or downgraded. Recalculate the deny mode on the file. 945 */ 946 static void 947 recalculate_deny_mode(struct nfs4_file *fp) 948 { 949 struct nfs4_ol_stateid *stp; 950 951 spin_lock(&fp->fi_lock); 952 fp->fi_share_deny = 0; 953 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) 954 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 955 spin_unlock(&fp->fi_lock); 956 } 957 958 static void 959 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 960 { 961 int i; 962 bool change = false; 963 964 for (i = 1; i < 4; i++) { 965 if ((i & deny) != i) { 966 change = true; 967 clear_deny(i, stp); 968 } 969 } 970 971 /* Recalculate per-file deny mode if there was a change */ 972 if (change) 973 recalculate_deny_mode(stp->st_stid.sc_file); 974 } 975 976 /* release all access and file references for a given stateid */ 977 static void 978 release_all_access(struct nfs4_ol_stateid *stp) 979 { 980 int i; 981 struct nfs4_file *fp = stp->st_stid.sc_file; 982 983 if (fp && stp->st_deny_bmap != 0) 984 recalculate_deny_mode(fp); 985 986 for (i = 1; i < 4; i++) { 987 if (test_access(i, stp)) 988 nfs4_file_put_access(stp->st_stid.sc_file, i); 989 clear_access(i, stp); 990 } 991 } 992 993 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 994 { 995 struct nfs4_client *clp = sop->so_client; 996 997 might_lock(&clp->cl_lock); 998 999 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1000 return; 1001 sop->so_ops->so_unhash(sop); 1002 spin_unlock(&clp->cl_lock); 1003 kfree(sop->so_owner.data); 1004 sop->so_ops->so_free(sop); 1005 } 1006 1007 static void unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1008 { 1009 struct nfs4_file *fp = stp->st_stid.sc_file; 1010 1011 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1012 1013 spin_lock(&fp->fi_lock); 1014 list_del(&stp->st_perfile); 1015 spin_unlock(&fp->fi_lock); 1016 list_del(&stp->st_perstateowner); 1017 } 1018 1019 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1020 { 1021 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1022 1023 put_clnt_odstate(stp->st_clnt_odstate); 1024 release_all_access(stp); 1025 if (stp->st_stateowner) 1026 nfs4_put_stateowner(stp->st_stateowner); 1027 kmem_cache_free(stateid_slab, stid); 1028 } 1029 1030 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1031 { 1032 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1033 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1034 struct file *file; 1035 1036 file = find_any_file(stp->st_stid.sc_file); 1037 if (file) 1038 filp_close(file, (fl_owner_t)lo); 1039 nfs4_free_ol_stateid(stid); 1040 } 1041 1042 /* 1043 * Put the persistent reference to an already unhashed generic stateid, while 1044 * holding the cl_lock. If it's the last reference, then put it onto the 1045 * reaplist for later destruction. 1046 */ 1047 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1048 struct list_head *reaplist) 1049 { 1050 struct nfs4_stid *s = &stp->st_stid; 1051 struct nfs4_client *clp = s->sc_client; 1052 1053 lockdep_assert_held(&clp->cl_lock); 1054 1055 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1056 1057 if (!atomic_dec_and_test(&s->sc_count)) { 1058 wake_up_all(&close_wq); 1059 return; 1060 } 1061 1062 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1063 list_add(&stp->st_locks, reaplist); 1064 } 1065 1066 static void unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1067 { 1068 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); 1069 1070 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); 1071 1072 list_del_init(&stp->st_locks); 1073 unhash_ol_stateid(stp); 1074 nfs4_unhash_stid(&stp->st_stid); 1075 } 1076 1077 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1078 { 1079 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); 1080 1081 spin_lock(&oo->oo_owner.so_client->cl_lock); 1082 unhash_lock_stateid(stp); 1083 spin_unlock(&oo->oo_owner.so_client->cl_lock); 1084 nfs4_put_stid(&stp->st_stid); 1085 } 1086 1087 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1088 { 1089 struct nfs4_client *clp = lo->lo_owner.so_client; 1090 1091 lockdep_assert_held(&clp->cl_lock); 1092 1093 list_del_init(&lo->lo_owner.so_strhash); 1094 } 1095 1096 /* 1097 * Free a list of generic stateids that were collected earlier after being 1098 * fully unhashed. 1099 */ 1100 static void 1101 free_ol_stateid_reaplist(struct list_head *reaplist) 1102 { 1103 struct nfs4_ol_stateid *stp; 1104 struct nfs4_file *fp; 1105 1106 might_sleep(); 1107 1108 while (!list_empty(reaplist)) { 1109 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1110 st_locks); 1111 list_del(&stp->st_locks); 1112 fp = stp->st_stid.sc_file; 1113 stp->st_stid.sc_free(&stp->st_stid); 1114 if (fp) 1115 put_nfs4_file(fp); 1116 } 1117 } 1118 1119 static void release_lockowner(struct nfs4_lockowner *lo) 1120 { 1121 struct nfs4_client *clp = lo->lo_owner.so_client; 1122 struct nfs4_ol_stateid *stp; 1123 struct list_head reaplist; 1124 1125 INIT_LIST_HEAD(&reaplist); 1126 1127 spin_lock(&clp->cl_lock); 1128 unhash_lockowner_locked(lo); 1129 while (!list_empty(&lo->lo_owner.so_stateids)) { 1130 stp = list_first_entry(&lo->lo_owner.so_stateids, 1131 struct nfs4_ol_stateid, st_perstateowner); 1132 unhash_lock_stateid(stp); 1133 put_ol_stateid_locked(stp, &reaplist); 1134 } 1135 spin_unlock(&clp->cl_lock); 1136 free_ol_stateid_reaplist(&reaplist); 1137 nfs4_put_stateowner(&lo->lo_owner); 1138 } 1139 1140 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1141 struct list_head *reaplist) 1142 { 1143 struct nfs4_ol_stateid *stp; 1144 1145 while (!list_empty(&open_stp->st_locks)) { 1146 stp = list_entry(open_stp->st_locks.next, 1147 struct nfs4_ol_stateid, st_locks); 1148 unhash_lock_stateid(stp); 1149 put_ol_stateid_locked(stp, reaplist); 1150 } 1151 } 1152 1153 static void unhash_open_stateid(struct nfs4_ol_stateid *stp, 1154 struct list_head *reaplist) 1155 { 1156 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1157 1158 unhash_ol_stateid(stp); 1159 release_open_stateid_locks(stp, reaplist); 1160 } 1161 1162 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1163 { 1164 LIST_HEAD(reaplist); 1165 1166 spin_lock(&stp->st_stid.sc_client->cl_lock); 1167 unhash_open_stateid(stp, &reaplist); 1168 put_ol_stateid_locked(stp, &reaplist); 1169 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1170 free_ol_stateid_reaplist(&reaplist); 1171 } 1172 1173 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1174 { 1175 struct nfs4_client *clp = oo->oo_owner.so_client; 1176 1177 lockdep_assert_held(&clp->cl_lock); 1178 1179 list_del_init(&oo->oo_owner.so_strhash); 1180 list_del_init(&oo->oo_perclient); 1181 } 1182 1183 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1184 { 1185 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1186 nfsd_net_id); 1187 struct nfs4_ol_stateid *s; 1188 1189 spin_lock(&nn->client_lock); 1190 s = oo->oo_last_closed_stid; 1191 if (s) { 1192 list_del_init(&oo->oo_close_lru); 1193 oo->oo_last_closed_stid = NULL; 1194 } 1195 spin_unlock(&nn->client_lock); 1196 if (s) 1197 nfs4_put_stid(&s->st_stid); 1198 } 1199 1200 static void release_openowner(struct nfs4_openowner *oo) 1201 { 1202 struct nfs4_ol_stateid *stp; 1203 struct nfs4_client *clp = oo->oo_owner.so_client; 1204 struct list_head reaplist; 1205 1206 INIT_LIST_HEAD(&reaplist); 1207 1208 spin_lock(&clp->cl_lock); 1209 unhash_openowner_locked(oo); 1210 while (!list_empty(&oo->oo_owner.so_stateids)) { 1211 stp = list_first_entry(&oo->oo_owner.so_stateids, 1212 struct nfs4_ol_stateid, st_perstateowner); 1213 unhash_open_stateid(stp, &reaplist); 1214 put_ol_stateid_locked(stp, &reaplist); 1215 } 1216 spin_unlock(&clp->cl_lock); 1217 free_ol_stateid_reaplist(&reaplist); 1218 release_last_closed_stateid(oo); 1219 nfs4_put_stateowner(&oo->oo_owner); 1220 } 1221 1222 static inline int 1223 hash_sessionid(struct nfs4_sessionid *sessionid) 1224 { 1225 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1226 1227 return sid->sequence % SESSION_HASH_SIZE; 1228 } 1229 1230 #ifdef CONFIG_SUNRPC_DEBUG 1231 static inline void 1232 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1233 { 1234 u32 *ptr = (u32 *)(&sessionid->data[0]); 1235 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1236 } 1237 #else 1238 static inline void 1239 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1240 { 1241 } 1242 #endif 1243 1244 /* 1245 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1246 * won't be used for replay. 1247 */ 1248 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1249 { 1250 struct nfs4_stateowner *so = cstate->replay_owner; 1251 1252 if (nfserr == nfserr_replay_me) 1253 return; 1254 1255 if (!seqid_mutating_err(ntohl(nfserr))) { 1256 nfsd4_cstate_clear_replay(cstate); 1257 return; 1258 } 1259 if (!so) 1260 return; 1261 if (so->so_is_open_owner) 1262 release_last_closed_stateid(openowner(so)); 1263 so->so_seqid++; 1264 return; 1265 } 1266 1267 static void 1268 gen_sessionid(struct nfsd4_session *ses) 1269 { 1270 struct nfs4_client *clp = ses->se_client; 1271 struct nfsd4_sessionid *sid; 1272 1273 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1274 sid->clientid = clp->cl_clientid; 1275 sid->sequence = current_sessionid++; 1276 sid->reserved = 0; 1277 } 1278 1279 /* 1280 * The protocol defines ca_maxresponssize_cached to include the size of 1281 * the rpc header, but all we need to cache is the data starting after 1282 * the end of the initial SEQUENCE operation--the rest we regenerate 1283 * each time. Therefore we can advertise a ca_maxresponssize_cached 1284 * value that is the number of bytes in our cache plus a few additional 1285 * bytes. In order to stay on the safe side, and not promise more than 1286 * we can cache, those additional bytes must be the minimum possible: 24 1287 * bytes of rpc header (xid through accept state, with AUTH_NULL 1288 * verifier), 12 for the compound header (with zero-length tag), and 44 1289 * for the SEQUENCE op response: 1290 */ 1291 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1292 1293 static void 1294 free_session_slots(struct nfsd4_session *ses) 1295 { 1296 int i; 1297 1298 for (i = 0; i < ses->se_fchannel.maxreqs; i++) 1299 kfree(ses->se_slots[i]); 1300 } 1301 1302 /* 1303 * We don't actually need to cache the rpc and session headers, so we 1304 * can allocate a little less for each slot: 1305 */ 1306 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 1307 { 1308 u32 size; 1309 1310 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 1311 size = 0; 1312 else 1313 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 1314 return size + sizeof(struct nfsd4_slot); 1315 } 1316 1317 /* 1318 * XXX: If we run out of reserved DRC memory we could (up to a point) 1319 * re-negotiate active sessions and reduce their slot usage to make 1320 * room for new connections. For now we just fail the create session. 1321 */ 1322 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) 1323 { 1324 u32 slotsize = slot_bytes(ca); 1325 u32 num = ca->maxreqs; 1326 int avail; 1327 1328 spin_lock(&nfsd_drc_lock); 1329 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, 1330 nfsd_drc_max_mem - nfsd_drc_mem_used); 1331 num = min_t(int, num, avail / slotsize); 1332 nfsd_drc_mem_used += num * slotsize; 1333 spin_unlock(&nfsd_drc_lock); 1334 1335 return num; 1336 } 1337 1338 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 1339 { 1340 int slotsize = slot_bytes(ca); 1341 1342 spin_lock(&nfsd_drc_lock); 1343 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 1344 spin_unlock(&nfsd_drc_lock); 1345 } 1346 1347 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 1348 struct nfsd4_channel_attrs *battrs) 1349 { 1350 int numslots = fattrs->maxreqs; 1351 int slotsize = slot_bytes(fattrs); 1352 struct nfsd4_session *new; 1353 int mem, i; 1354 1355 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 1356 + sizeof(struct nfsd4_session) > PAGE_SIZE); 1357 mem = numslots * sizeof(struct nfsd4_slot *); 1358 1359 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 1360 if (!new) 1361 return NULL; 1362 /* allocate each struct nfsd4_slot and data cache in one piece */ 1363 for (i = 0; i < numslots; i++) { 1364 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 1365 if (!new->se_slots[i]) 1366 goto out_free; 1367 } 1368 1369 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 1370 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); 1371 1372 return new; 1373 out_free: 1374 while (i--) 1375 kfree(new->se_slots[i]); 1376 kfree(new); 1377 return NULL; 1378 } 1379 1380 static void free_conn(struct nfsd4_conn *c) 1381 { 1382 svc_xprt_put(c->cn_xprt); 1383 kfree(c); 1384 } 1385 1386 static void nfsd4_conn_lost(struct svc_xpt_user *u) 1387 { 1388 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 1389 struct nfs4_client *clp = c->cn_session->se_client; 1390 1391 spin_lock(&clp->cl_lock); 1392 if (!list_empty(&c->cn_persession)) { 1393 list_del(&c->cn_persession); 1394 free_conn(c); 1395 } 1396 nfsd4_probe_callback(clp); 1397 spin_unlock(&clp->cl_lock); 1398 } 1399 1400 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 1401 { 1402 struct nfsd4_conn *conn; 1403 1404 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 1405 if (!conn) 1406 return NULL; 1407 svc_xprt_get(rqstp->rq_xprt); 1408 conn->cn_xprt = rqstp->rq_xprt; 1409 conn->cn_flags = flags; 1410 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 1411 return conn; 1412 } 1413 1414 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1415 { 1416 conn->cn_session = ses; 1417 list_add(&conn->cn_persession, &ses->se_conns); 1418 } 1419 1420 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1421 { 1422 struct nfs4_client *clp = ses->se_client; 1423 1424 spin_lock(&clp->cl_lock); 1425 __nfsd4_hash_conn(conn, ses); 1426 spin_unlock(&clp->cl_lock); 1427 } 1428 1429 static int nfsd4_register_conn(struct nfsd4_conn *conn) 1430 { 1431 conn->cn_xpt_user.callback = nfsd4_conn_lost; 1432 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 1433 } 1434 1435 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 1436 { 1437 int ret; 1438 1439 nfsd4_hash_conn(conn, ses); 1440 ret = nfsd4_register_conn(conn); 1441 if (ret) 1442 /* oops; xprt is already down: */ 1443 nfsd4_conn_lost(&conn->cn_xpt_user); 1444 /* We may have gained or lost a callback channel: */ 1445 nfsd4_probe_callback_sync(ses->se_client); 1446 } 1447 1448 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 1449 { 1450 u32 dir = NFS4_CDFC4_FORE; 1451 1452 if (cses->flags & SESSION4_BACK_CHAN) 1453 dir |= NFS4_CDFC4_BACK; 1454 return alloc_conn(rqstp, dir); 1455 } 1456 1457 /* must be called under client_lock */ 1458 static void nfsd4_del_conns(struct nfsd4_session *s) 1459 { 1460 struct nfs4_client *clp = s->se_client; 1461 struct nfsd4_conn *c; 1462 1463 spin_lock(&clp->cl_lock); 1464 while (!list_empty(&s->se_conns)) { 1465 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 1466 list_del_init(&c->cn_persession); 1467 spin_unlock(&clp->cl_lock); 1468 1469 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 1470 free_conn(c); 1471 1472 spin_lock(&clp->cl_lock); 1473 } 1474 spin_unlock(&clp->cl_lock); 1475 } 1476 1477 static void __free_session(struct nfsd4_session *ses) 1478 { 1479 free_session_slots(ses); 1480 kfree(ses); 1481 } 1482 1483 static void free_session(struct nfsd4_session *ses) 1484 { 1485 nfsd4_del_conns(ses); 1486 nfsd4_put_drc_mem(&ses->se_fchannel); 1487 __free_session(ses); 1488 } 1489 1490 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 1491 { 1492 int idx; 1493 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1494 1495 new->se_client = clp; 1496 gen_sessionid(new); 1497 1498 INIT_LIST_HEAD(&new->se_conns); 1499 1500 new->se_cb_seq_nr = 1; 1501 new->se_flags = cses->flags; 1502 new->se_cb_prog = cses->callback_prog; 1503 new->se_cb_sec = cses->cb_sec; 1504 atomic_set(&new->se_ref, 0); 1505 idx = hash_sessionid(&new->se_sessionid); 1506 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1507 spin_lock(&clp->cl_lock); 1508 list_add(&new->se_perclnt, &clp->cl_sessions); 1509 spin_unlock(&clp->cl_lock); 1510 1511 { 1512 struct sockaddr *sa = svc_addr(rqstp); 1513 /* 1514 * This is a little silly; with sessions there's no real 1515 * use for the callback address. Use the peer address 1516 * as a reasonable default for now, but consider fixing 1517 * the rpc client not to require an address in the 1518 * future: 1519 */ 1520 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 1521 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 1522 } 1523 } 1524 1525 /* caller must hold client_lock */ 1526 static struct nfsd4_session * 1527 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1528 { 1529 struct nfsd4_session *elem; 1530 int idx; 1531 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1532 1533 lockdep_assert_held(&nn->client_lock); 1534 1535 dump_sessionid(__func__, sessionid); 1536 idx = hash_sessionid(sessionid); 1537 /* Search in the appropriate list */ 1538 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 1539 if (!memcmp(elem->se_sessionid.data, sessionid->data, 1540 NFS4_MAX_SESSIONID_LEN)) { 1541 return elem; 1542 } 1543 } 1544 1545 dprintk("%s: session not found\n", __func__); 1546 return NULL; 1547 } 1548 1549 static struct nfsd4_session * 1550 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 1551 __be32 *ret) 1552 { 1553 struct nfsd4_session *session; 1554 __be32 status = nfserr_badsession; 1555 1556 session = __find_in_sessionid_hashtbl(sessionid, net); 1557 if (!session) 1558 goto out; 1559 status = nfsd4_get_session_locked(session); 1560 if (status) 1561 session = NULL; 1562 out: 1563 *ret = status; 1564 return session; 1565 } 1566 1567 /* caller must hold client_lock */ 1568 static void 1569 unhash_session(struct nfsd4_session *ses) 1570 { 1571 struct nfs4_client *clp = ses->se_client; 1572 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1573 1574 lockdep_assert_held(&nn->client_lock); 1575 1576 list_del(&ses->se_hash); 1577 spin_lock(&ses->se_client->cl_lock); 1578 list_del(&ses->se_perclnt); 1579 spin_unlock(&ses->se_client->cl_lock); 1580 } 1581 1582 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1583 static int 1584 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1585 { 1586 /* 1587 * We're assuming the clid was not given out from a boot 1588 * precisely 2^32 (about 136 years) before this one. That seems 1589 * a safe assumption: 1590 */ 1591 if (clid->cl_boot == (u32)nn->boot_time) 1592 return 0; 1593 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 1594 clid->cl_boot, clid->cl_id, nn->boot_time); 1595 return 1; 1596 } 1597 1598 /* 1599 * XXX Should we use a slab cache ? 1600 * This type of memory management is somewhat inefficient, but we use it 1601 * anyway since SETCLIENTID is not a common operation. 1602 */ 1603 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1604 { 1605 struct nfs4_client *clp; 1606 int i; 1607 1608 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 1609 if (clp == NULL) 1610 return NULL; 1611 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); 1612 if (clp->cl_name.data == NULL) 1613 goto err_no_name; 1614 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * 1615 OWNER_HASH_SIZE, GFP_KERNEL); 1616 if (!clp->cl_ownerstr_hashtbl) 1617 goto err_no_hashtbl; 1618 for (i = 0; i < OWNER_HASH_SIZE; i++) 1619 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 1620 clp->cl_name.len = name.len; 1621 INIT_LIST_HEAD(&clp->cl_sessions); 1622 idr_init(&clp->cl_stateids); 1623 atomic_set(&clp->cl_refcount, 0); 1624 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1625 INIT_LIST_HEAD(&clp->cl_idhash); 1626 INIT_LIST_HEAD(&clp->cl_openowners); 1627 INIT_LIST_HEAD(&clp->cl_delegations); 1628 INIT_LIST_HEAD(&clp->cl_lru); 1629 INIT_LIST_HEAD(&clp->cl_revoked); 1630 #ifdef CONFIG_NFSD_PNFS 1631 INIT_LIST_HEAD(&clp->cl_lo_states); 1632 #endif 1633 spin_lock_init(&clp->cl_lock); 1634 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1635 return clp; 1636 err_no_hashtbl: 1637 kfree(clp->cl_name.data); 1638 err_no_name: 1639 kfree(clp); 1640 return NULL; 1641 } 1642 1643 static void 1644 free_client(struct nfs4_client *clp) 1645 { 1646 while (!list_empty(&clp->cl_sessions)) { 1647 struct nfsd4_session *ses; 1648 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1649 se_perclnt); 1650 list_del(&ses->se_perclnt); 1651 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 1652 free_session(ses); 1653 } 1654 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 1655 free_svc_cred(&clp->cl_cred); 1656 kfree(clp->cl_ownerstr_hashtbl); 1657 kfree(clp->cl_name.data); 1658 idr_destroy(&clp->cl_stateids); 1659 kfree(clp); 1660 } 1661 1662 /* must be called under the client_lock */ 1663 static void 1664 unhash_client_locked(struct nfs4_client *clp) 1665 { 1666 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1667 struct nfsd4_session *ses; 1668 1669 lockdep_assert_held(&nn->client_lock); 1670 1671 /* Mark the client as expired! */ 1672 clp->cl_time = 0; 1673 /* Make it invisible */ 1674 if (!list_empty(&clp->cl_idhash)) { 1675 list_del_init(&clp->cl_idhash); 1676 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 1677 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 1678 else 1679 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1680 } 1681 list_del_init(&clp->cl_lru); 1682 spin_lock(&clp->cl_lock); 1683 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1684 list_del_init(&ses->se_hash); 1685 spin_unlock(&clp->cl_lock); 1686 } 1687 1688 static void 1689 unhash_client(struct nfs4_client *clp) 1690 { 1691 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1692 1693 spin_lock(&nn->client_lock); 1694 unhash_client_locked(clp); 1695 spin_unlock(&nn->client_lock); 1696 } 1697 1698 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 1699 { 1700 if (atomic_read(&clp->cl_refcount)) 1701 return nfserr_jukebox; 1702 unhash_client_locked(clp); 1703 return nfs_ok; 1704 } 1705 1706 static void 1707 __destroy_client(struct nfs4_client *clp) 1708 { 1709 struct nfs4_openowner *oo; 1710 struct nfs4_delegation *dp; 1711 struct list_head reaplist; 1712 1713 INIT_LIST_HEAD(&reaplist); 1714 spin_lock(&state_lock); 1715 while (!list_empty(&clp->cl_delegations)) { 1716 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 1717 unhash_delegation_locked(dp); 1718 list_add(&dp->dl_recall_lru, &reaplist); 1719 } 1720 spin_unlock(&state_lock); 1721 while (!list_empty(&reaplist)) { 1722 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1723 list_del_init(&dp->dl_recall_lru); 1724 put_clnt_odstate(dp->dl_clnt_odstate); 1725 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 1726 nfs4_put_stid(&dp->dl_stid); 1727 } 1728 while (!list_empty(&clp->cl_revoked)) { 1729 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 1730 list_del_init(&dp->dl_recall_lru); 1731 nfs4_put_stid(&dp->dl_stid); 1732 } 1733 while (!list_empty(&clp->cl_openowners)) { 1734 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1735 nfs4_get_stateowner(&oo->oo_owner); 1736 release_openowner(oo); 1737 } 1738 nfsd4_return_all_client_layouts(clp); 1739 nfsd4_shutdown_callback(clp); 1740 if (clp->cl_cb_conn.cb_xprt) 1741 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 1742 free_client(clp); 1743 } 1744 1745 static void 1746 destroy_client(struct nfs4_client *clp) 1747 { 1748 unhash_client(clp); 1749 __destroy_client(clp); 1750 } 1751 1752 static void expire_client(struct nfs4_client *clp) 1753 { 1754 unhash_client(clp); 1755 nfsd4_client_record_remove(clp); 1756 __destroy_client(clp); 1757 } 1758 1759 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 1760 { 1761 memcpy(target->cl_verifier.data, source->data, 1762 sizeof(target->cl_verifier.data)); 1763 } 1764 1765 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 1766 { 1767 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 1768 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 1769 } 1770 1771 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 1772 { 1773 if (source->cr_principal) { 1774 target->cr_principal = 1775 kstrdup(source->cr_principal, GFP_KERNEL); 1776 if (target->cr_principal == NULL) 1777 return -ENOMEM; 1778 } else 1779 target->cr_principal = NULL; 1780 target->cr_flavor = source->cr_flavor; 1781 target->cr_uid = source->cr_uid; 1782 target->cr_gid = source->cr_gid; 1783 target->cr_group_info = source->cr_group_info; 1784 get_group_info(target->cr_group_info); 1785 target->cr_gss_mech = source->cr_gss_mech; 1786 if (source->cr_gss_mech) 1787 gss_mech_get(source->cr_gss_mech); 1788 return 0; 1789 } 1790 1791 static int 1792 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 1793 { 1794 if (o1->len < o2->len) 1795 return -1; 1796 if (o1->len > o2->len) 1797 return 1; 1798 return memcmp(o1->data, o2->data, o1->len); 1799 } 1800 1801 static int same_name(const char *n1, const char *n2) 1802 { 1803 return 0 == memcmp(n1, n2, HEXDIR_LEN); 1804 } 1805 1806 static int 1807 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 1808 { 1809 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 1810 } 1811 1812 static int 1813 same_clid(clientid_t *cl1, clientid_t *cl2) 1814 { 1815 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1816 } 1817 1818 static bool groups_equal(struct group_info *g1, struct group_info *g2) 1819 { 1820 int i; 1821 1822 if (g1->ngroups != g2->ngroups) 1823 return false; 1824 for (i=0; i<g1->ngroups; i++) 1825 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i))) 1826 return false; 1827 return true; 1828 } 1829 1830 /* 1831 * RFC 3530 language requires clid_inuse be returned when the 1832 * "principal" associated with a requests differs from that previously 1833 * used. We use uid, gid's, and gss principal string as our best 1834 * approximation. We also don't want to allow non-gss use of a client 1835 * established using gss: in theory cr_principal should catch that 1836 * change, but in practice cr_principal can be null even in the gss case 1837 * since gssd doesn't always pass down a principal string. 1838 */ 1839 static bool is_gss_cred(struct svc_cred *cr) 1840 { 1841 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 1842 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 1843 } 1844 1845 1846 static bool 1847 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1848 { 1849 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 1850 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 1851 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 1852 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 1853 return false; 1854 if (cr1->cr_principal == cr2->cr_principal) 1855 return true; 1856 if (!cr1->cr_principal || !cr2->cr_principal) 1857 return false; 1858 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 1859 } 1860 1861 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 1862 { 1863 struct svc_cred *cr = &rqstp->rq_cred; 1864 u32 service; 1865 1866 if (!cr->cr_gss_mech) 1867 return false; 1868 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 1869 return service == RPC_GSS_SVC_INTEGRITY || 1870 service == RPC_GSS_SVC_PRIVACY; 1871 } 1872 1873 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 1874 { 1875 struct svc_cred *cr = &rqstp->rq_cred; 1876 1877 if (!cl->cl_mach_cred) 1878 return true; 1879 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 1880 return false; 1881 if (!svc_rqst_integrity_protected(rqstp)) 1882 return false; 1883 if (!cr->cr_principal) 1884 return false; 1885 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 1886 } 1887 1888 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 1889 { 1890 __be32 verf[2]; 1891 1892 /* 1893 * This is opaque to client, so no need to byte-swap. Use 1894 * __force to keep sparse happy 1895 */ 1896 verf[0] = (__force __be32)get_seconds(); 1897 verf[1] = (__force __be32)nn->clientid_counter; 1898 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 1899 } 1900 1901 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 1902 { 1903 clp->cl_clientid.cl_boot = nn->boot_time; 1904 clp->cl_clientid.cl_id = nn->clientid_counter++; 1905 gen_confirm(clp, nn); 1906 } 1907 1908 static struct nfs4_stid * 1909 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 1910 { 1911 struct nfs4_stid *ret; 1912 1913 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 1914 if (!ret || !ret->sc_type) 1915 return NULL; 1916 return ret; 1917 } 1918 1919 static struct nfs4_stid * 1920 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 1921 { 1922 struct nfs4_stid *s; 1923 1924 spin_lock(&cl->cl_lock); 1925 s = find_stateid_locked(cl, t); 1926 if (s != NULL) { 1927 if (typemask & s->sc_type) 1928 atomic_inc(&s->sc_count); 1929 else 1930 s = NULL; 1931 } 1932 spin_unlock(&cl->cl_lock); 1933 return s; 1934 } 1935 1936 static struct nfs4_client *create_client(struct xdr_netobj name, 1937 struct svc_rqst *rqstp, nfs4_verifier *verf) 1938 { 1939 struct nfs4_client *clp; 1940 struct sockaddr *sa = svc_addr(rqstp); 1941 int ret; 1942 struct net *net = SVC_NET(rqstp); 1943 1944 clp = alloc_client(name); 1945 if (clp == NULL) 1946 return NULL; 1947 1948 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1949 if (ret) { 1950 free_client(clp); 1951 return NULL; 1952 } 1953 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 1954 clp->cl_time = get_seconds(); 1955 clear_bit(0, &clp->cl_cb_slot_busy); 1956 copy_verf(clp, verf); 1957 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1958 clp->cl_cb_session = NULL; 1959 clp->net = net; 1960 return clp; 1961 } 1962 1963 static void 1964 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 1965 { 1966 struct rb_node **new = &(root->rb_node), *parent = NULL; 1967 struct nfs4_client *clp; 1968 1969 while (*new) { 1970 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 1971 parent = *new; 1972 1973 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 1974 new = &((*new)->rb_left); 1975 else 1976 new = &((*new)->rb_right); 1977 } 1978 1979 rb_link_node(&new_clp->cl_namenode, parent, new); 1980 rb_insert_color(&new_clp->cl_namenode, root); 1981 } 1982 1983 static struct nfs4_client * 1984 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 1985 { 1986 int cmp; 1987 struct rb_node *node = root->rb_node; 1988 struct nfs4_client *clp; 1989 1990 while (node) { 1991 clp = rb_entry(node, struct nfs4_client, cl_namenode); 1992 cmp = compare_blob(&clp->cl_name, name); 1993 if (cmp > 0) 1994 node = node->rb_left; 1995 else if (cmp < 0) 1996 node = node->rb_right; 1997 else 1998 return clp; 1999 } 2000 return NULL; 2001 } 2002 2003 static void 2004 add_to_unconfirmed(struct nfs4_client *clp) 2005 { 2006 unsigned int idhashval; 2007 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2008 2009 lockdep_assert_held(&nn->client_lock); 2010 2011 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2012 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 2013 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2014 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 2015 renew_client_locked(clp); 2016 } 2017 2018 static void 2019 move_to_confirmed(struct nfs4_client *clp) 2020 { 2021 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2022 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2023 2024 lockdep_assert_held(&nn->client_lock); 2025 2026 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 2027 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 2028 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2029 add_clp_to_name_tree(clp, &nn->conf_name_tree); 2030 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2031 renew_client_locked(clp); 2032 } 2033 2034 static struct nfs4_client * 2035 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 2036 { 2037 struct nfs4_client *clp; 2038 unsigned int idhashval = clientid_hashval(clid->cl_id); 2039 2040 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 2041 if (same_clid(&clp->cl_clientid, clid)) { 2042 if ((bool)clp->cl_minorversion != sessions) 2043 return NULL; 2044 renew_client_locked(clp); 2045 return clp; 2046 } 2047 } 2048 return NULL; 2049 } 2050 2051 static struct nfs4_client * 2052 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2053 { 2054 struct list_head *tbl = nn->conf_id_hashtbl; 2055 2056 lockdep_assert_held(&nn->client_lock); 2057 return find_client_in_id_table(tbl, clid, sessions); 2058 } 2059 2060 static struct nfs4_client * 2061 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2062 { 2063 struct list_head *tbl = nn->unconf_id_hashtbl; 2064 2065 lockdep_assert_held(&nn->client_lock); 2066 return find_client_in_id_table(tbl, clid, sessions); 2067 } 2068 2069 static bool clp_used_exchangeid(struct nfs4_client *clp) 2070 { 2071 return clp->cl_exchange_flags != 0; 2072 } 2073 2074 static struct nfs4_client * 2075 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2076 { 2077 lockdep_assert_held(&nn->client_lock); 2078 return find_clp_in_name_tree(name, &nn->conf_name_tree); 2079 } 2080 2081 static struct nfs4_client * 2082 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2083 { 2084 lockdep_assert_held(&nn->client_lock); 2085 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 2086 } 2087 2088 static void 2089 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 2090 { 2091 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 2092 struct sockaddr *sa = svc_addr(rqstp); 2093 u32 scopeid = rpc_get_scope_id(sa); 2094 unsigned short expected_family; 2095 2096 /* Currently, we only support tcp and tcp6 for the callback channel */ 2097 if (se->se_callback_netid_len == 3 && 2098 !memcmp(se->se_callback_netid_val, "tcp", 3)) 2099 expected_family = AF_INET; 2100 else if (se->se_callback_netid_len == 4 && 2101 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 2102 expected_family = AF_INET6; 2103 else 2104 goto out_err; 2105 2106 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 2107 se->se_callback_addr_len, 2108 (struct sockaddr *)&conn->cb_addr, 2109 sizeof(conn->cb_addr)); 2110 2111 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 2112 goto out_err; 2113 2114 if (conn->cb_addr.ss_family == AF_INET6) 2115 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 2116 2117 conn->cb_prog = se->se_callback_prog; 2118 conn->cb_ident = se->se_callback_ident; 2119 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 2120 return; 2121 out_err: 2122 conn->cb_addr.ss_family = AF_UNSPEC; 2123 conn->cb_addrlen = 0; 2124 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " 2125 "will not receive delegations\n", 2126 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 2127 2128 return; 2129 } 2130 2131 /* 2132 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 2133 */ 2134 static void 2135 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 2136 { 2137 struct xdr_buf *buf = resp->xdr.buf; 2138 struct nfsd4_slot *slot = resp->cstate.slot; 2139 unsigned int base; 2140 2141 dprintk("--> %s slot %p\n", __func__, slot); 2142 2143 slot->sl_opcnt = resp->opcnt; 2144 slot->sl_status = resp->cstate.status; 2145 2146 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 2147 if (nfsd4_not_cached(resp)) { 2148 slot->sl_datalen = 0; 2149 return; 2150 } 2151 base = resp->cstate.data_offset; 2152 slot->sl_datalen = buf->len - base; 2153 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 2154 WARN("%s: sessions DRC could not cache compound\n", __func__); 2155 return; 2156 } 2157 2158 /* 2159 * Encode the replay sequence operation from the slot values. 2160 * If cachethis is FALSE encode the uncached rep error on the next 2161 * operation which sets resp->p and increments resp->opcnt for 2162 * nfs4svc_encode_compoundres. 2163 * 2164 */ 2165 static __be32 2166 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 2167 struct nfsd4_compoundres *resp) 2168 { 2169 struct nfsd4_op *op; 2170 struct nfsd4_slot *slot = resp->cstate.slot; 2171 2172 /* Encode the replayed sequence operation */ 2173 op = &args->ops[resp->opcnt - 1]; 2174 nfsd4_encode_operation(resp, op); 2175 2176 /* Return nfserr_retry_uncached_rep in next operation. */ 2177 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { 2178 op = &args->ops[resp->opcnt++]; 2179 op->status = nfserr_retry_uncached_rep; 2180 nfsd4_encode_operation(resp, op); 2181 } 2182 return op->status; 2183 } 2184 2185 /* 2186 * The sequence operation is not cached because we can use the slot and 2187 * session values. 2188 */ 2189 static __be32 2190 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 2191 struct nfsd4_sequence *seq) 2192 { 2193 struct nfsd4_slot *slot = resp->cstate.slot; 2194 struct xdr_stream *xdr = &resp->xdr; 2195 __be32 *p; 2196 __be32 status; 2197 2198 dprintk("--> %s slot %p\n", __func__, slot); 2199 2200 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 2201 if (status) 2202 return status; 2203 2204 p = xdr_reserve_space(xdr, slot->sl_datalen); 2205 if (!p) { 2206 WARN_ON_ONCE(1); 2207 return nfserr_serverfault; 2208 } 2209 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 2210 xdr_commit_encode(xdr); 2211 2212 resp->opcnt = slot->sl_opcnt; 2213 return slot->sl_status; 2214 } 2215 2216 /* 2217 * Set the exchange_id flags returned by the server. 2218 */ 2219 static void 2220 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 2221 { 2222 #ifdef CONFIG_NFSD_PNFS 2223 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 2224 #else 2225 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 2226 #endif 2227 2228 /* Referrals are supported, Migration is not. */ 2229 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 2230 2231 /* set the wire flags to return to client. */ 2232 clid->flags = new->cl_exchange_flags; 2233 } 2234 2235 static bool client_has_state(struct nfs4_client *clp) 2236 { 2237 /* 2238 * Note clp->cl_openowners check isn't quite right: there's no 2239 * need to count owners without stateid's. 2240 * 2241 * Also note we should probably be using this in 4.0 case too. 2242 */ 2243 return !list_empty(&clp->cl_openowners) 2244 || !list_empty(&clp->cl_delegations) 2245 || !list_empty(&clp->cl_sessions); 2246 } 2247 2248 __be32 2249 nfsd4_exchange_id(struct svc_rqst *rqstp, 2250 struct nfsd4_compound_state *cstate, 2251 struct nfsd4_exchange_id *exid) 2252 { 2253 struct nfs4_client *conf, *new; 2254 struct nfs4_client *unconf = NULL; 2255 __be32 status; 2256 char addr_str[INET6_ADDRSTRLEN]; 2257 nfs4_verifier verf = exid->verifier; 2258 struct sockaddr *sa = svc_addr(rqstp); 2259 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 2260 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2261 2262 rpc_ntop(sa, addr_str, sizeof(addr_str)); 2263 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 2264 "ip_addr=%s flags %x, spa_how %d\n", 2265 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 2266 addr_str, exid->flags, exid->spa_how); 2267 2268 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 2269 return nfserr_inval; 2270 2271 switch (exid->spa_how) { 2272 case SP4_MACH_CRED: 2273 if (!svc_rqst_integrity_protected(rqstp)) 2274 return nfserr_inval; 2275 case SP4_NONE: 2276 break; 2277 default: /* checked by xdr code */ 2278 WARN_ON_ONCE(1); 2279 case SP4_SSV: 2280 return nfserr_encr_alg_unsupp; 2281 } 2282 2283 new = create_client(exid->clname, rqstp, &verf); 2284 if (new == NULL) 2285 return nfserr_jukebox; 2286 2287 /* Cases below refer to rfc 5661 section 18.35.4: */ 2288 spin_lock(&nn->client_lock); 2289 conf = find_confirmed_client_by_name(&exid->clname, nn); 2290 if (conf) { 2291 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 2292 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 2293 2294 if (update) { 2295 if (!clp_used_exchangeid(conf)) { /* buggy client */ 2296 status = nfserr_inval; 2297 goto out; 2298 } 2299 if (!mach_creds_match(conf, rqstp)) { 2300 status = nfserr_wrong_cred; 2301 goto out; 2302 } 2303 if (!creds_match) { /* case 9 */ 2304 status = nfserr_perm; 2305 goto out; 2306 } 2307 if (!verfs_match) { /* case 8 */ 2308 status = nfserr_not_same; 2309 goto out; 2310 } 2311 /* case 6 */ 2312 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 2313 goto out_copy; 2314 } 2315 if (!creds_match) { /* case 3 */ 2316 if (client_has_state(conf)) { 2317 status = nfserr_clid_inuse; 2318 goto out; 2319 } 2320 goto out_new; 2321 } 2322 if (verfs_match) { /* case 2 */ 2323 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 2324 goto out_copy; 2325 } 2326 /* case 5, client reboot */ 2327 conf = NULL; 2328 goto out_new; 2329 } 2330 2331 if (update) { /* case 7 */ 2332 status = nfserr_noent; 2333 goto out; 2334 } 2335 2336 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 2337 if (unconf) /* case 4, possible retry or client restart */ 2338 unhash_client_locked(unconf); 2339 2340 /* case 1 (normal case) */ 2341 out_new: 2342 if (conf) { 2343 status = mark_client_expired_locked(conf); 2344 if (status) 2345 goto out; 2346 } 2347 new->cl_minorversion = cstate->minorversion; 2348 new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED); 2349 2350 gen_clid(new, nn); 2351 add_to_unconfirmed(new); 2352 swap(new, conf); 2353 out_copy: 2354 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 2355 exid->clientid.cl_id = conf->cl_clientid.cl_id; 2356 2357 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 2358 nfsd4_set_ex_flags(conf, exid); 2359 2360 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 2361 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 2362 status = nfs_ok; 2363 2364 out: 2365 spin_unlock(&nn->client_lock); 2366 if (new) 2367 expire_client(new); 2368 if (unconf) 2369 expire_client(unconf); 2370 return status; 2371 } 2372 2373 static __be32 2374 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 2375 { 2376 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 2377 slot_seqid); 2378 2379 /* The slot is in use, and no response has been sent. */ 2380 if (slot_inuse) { 2381 if (seqid == slot_seqid) 2382 return nfserr_jukebox; 2383 else 2384 return nfserr_seq_misordered; 2385 } 2386 /* Note unsigned 32-bit arithmetic handles wraparound: */ 2387 if (likely(seqid == slot_seqid + 1)) 2388 return nfs_ok; 2389 if (seqid == slot_seqid) 2390 return nfserr_replay_cache; 2391 return nfserr_seq_misordered; 2392 } 2393 2394 /* 2395 * Cache the create session result into the create session single DRC 2396 * slot cache by saving the xdr structure. sl_seqid has been set. 2397 * Do this for solo or embedded create session operations. 2398 */ 2399 static void 2400 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 2401 struct nfsd4_clid_slot *slot, __be32 nfserr) 2402 { 2403 slot->sl_status = nfserr; 2404 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 2405 } 2406 2407 static __be32 2408 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 2409 struct nfsd4_clid_slot *slot) 2410 { 2411 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 2412 return slot->sl_status; 2413 } 2414 2415 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 2416 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 2417 1 + /* MIN tag is length with zero, only length */ \ 2418 3 + /* version, opcount, opcode */ \ 2419 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 2420 /* seqid, slotID, slotID, cache */ \ 2421 4 ) * sizeof(__be32)) 2422 2423 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 2424 2 + /* verifier: AUTH_NULL, length 0 */\ 2425 1 + /* status */ \ 2426 1 + /* MIN tag is length with zero, only length */ \ 2427 3 + /* opcount, opcode, opstatus*/ \ 2428 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 2429 /* seqid, slotID, slotID, slotID, status */ \ 2430 5 ) * sizeof(__be32)) 2431 2432 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 2433 { 2434 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 2435 2436 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 2437 return nfserr_toosmall; 2438 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 2439 return nfserr_toosmall; 2440 ca->headerpadsz = 0; 2441 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 2442 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 2443 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 2444 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 2445 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 2446 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 2447 /* 2448 * Note decreasing slot size below client's request may make it 2449 * difficult for client to function correctly, whereas 2450 * decreasing the number of slots will (just?) affect 2451 * performance. When short on memory we therefore prefer to 2452 * decrease number of slots instead of their size. Clients that 2453 * request larger slots than they need will get poor results: 2454 */ 2455 ca->maxreqs = nfsd4_get_drc_mem(ca); 2456 if (!ca->maxreqs) 2457 return nfserr_jukebox; 2458 2459 return nfs_ok; 2460 } 2461 2462 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 2463 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32)) 2464 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 2465 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32)) 2466 2467 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 2468 { 2469 ca->headerpadsz = 0; 2470 2471 /* 2472 * These RPC_MAX_HEADER macros are overkill, especially since we 2473 * don't even do gss on the backchannel yet. But this is still 2474 * less than 1k. Tighten up this estimate in the unlikely event 2475 * it turns out to be a problem for some client: 2476 */ 2477 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 2478 return nfserr_toosmall; 2479 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 2480 return nfserr_toosmall; 2481 ca->maxresp_cached = 0; 2482 if (ca->maxops < 2) 2483 return nfserr_toosmall; 2484 2485 return nfs_ok; 2486 } 2487 2488 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 2489 { 2490 switch (cbs->flavor) { 2491 case RPC_AUTH_NULL: 2492 case RPC_AUTH_UNIX: 2493 return nfs_ok; 2494 default: 2495 /* 2496 * GSS case: the spec doesn't allow us to return this 2497 * error. But it also doesn't allow us not to support 2498 * GSS. 2499 * I'd rather this fail hard than return some error the 2500 * client might think it can already handle: 2501 */ 2502 return nfserr_encr_alg_unsupp; 2503 } 2504 } 2505 2506 __be32 2507 nfsd4_create_session(struct svc_rqst *rqstp, 2508 struct nfsd4_compound_state *cstate, 2509 struct nfsd4_create_session *cr_ses) 2510 { 2511 struct sockaddr *sa = svc_addr(rqstp); 2512 struct nfs4_client *conf, *unconf; 2513 struct nfs4_client *old = NULL; 2514 struct nfsd4_session *new; 2515 struct nfsd4_conn *conn; 2516 struct nfsd4_clid_slot *cs_slot = NULL; 2517 __be32 status = 0; 2518 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2519 2520 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 2521 return nfserr_inval; 2522 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 2523 if (status) 2524 return status; 2525 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 2526 if (status) 2527 return status; 2528 status = check_backchannel_attrs(&cr_ses->back_channel); 2529 if (status) 2530 goto out_release_drc_mem; 2531 status = nfserr_jukebox; 2532 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 2533 if (!new) 2534 goto out_release_drc_mem; 2535 conn = alloc_conn_from_crses(rqstp, cr_ses); 2536 if (!conn) 2537 goto out_free_session; 2538 2539 spin_lock(&nn->client_lock); 2540 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 2541 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 2542 WARN_ON_ONCE(conf && unconf); 2543 2544 if (conf) { 2545 status = nfserr_wrong_cred; 2546 if (!mach_creds_match(conf, rqstp)) 2547 goto out_free_conn; 2548 cs_slot = &conf->cl_cs_slot; 2549 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 2550 if (status == nfserr_replay_cache) { 2551 status = nfsd4_replay_create_session(cr_ses, cs_slot); 2552 goto out_free_conn; 2553 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { 2554 status = nfserr_seq_misordered; 2555 goto out_free_conn; 2556 } 2557 } else if (unconf) { 2558 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 2559 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 2560 status = nfserr_clid_inuse; 2561 goto out_free_conn; 2562 } 2563 status = nfserr_wrong_cred; 2564 if (!mach_creds_match(unconf, rqstp)) 2565 goto out_free_conn; 2566 cs_slot = &unconf->cl_cs_slot; 2567 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 2568 if (status) { 2569 /* an unconfirmed replay returns misordered */ 2570 status = nfserr_seq_misordered; 2571 goto out_free_conn; 2572 } 2573 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 2574 if (old) { 2575 status = mark_client_expired_locked(old); 2576 if (status) { 2577 old = NULL; 2578 goto out_free_conn; 2579 } 2580 } 2581 move_to_confirmed(unconf); 2582 conf = unconf; 2583 } else { 2584 status = nfserr_stale_clientid; 2585 goto out_free_conn; 2586 } 2587 status = nfs_ok; 2588 /* 2589 * We do not support RDMA or persistent sessions 2590 */ 2591 cr_ses->flags &= ~SESSION4_PERSIST; 2592 cr_ses->flags &= ~SESSION4_RDMA; 2593 2594 init_session(rqstp, new, conf, cr_ses); 2595 nfsd4_get_session_locked(new); 2596 2597 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 2598 NFS4_MAX_SESSIONID_LEN); 2599 cs_slot->sl_seqid++; 2600 cr_ses->seqid = cs_slot->sl_seqid; 2601 2602 /* cache solo and embedded create sessions under the client_lock */ 2603 nfsd4_cache_create_session(cr_ses, cs_slot, status); 2604 spin_unlock(&nn->client_lock); 2605 /* init connection and backchannel */ 2606 nfsd4_init_conn(rqstp, conn, new); 2607 nfsd4_put_session(new); 2608 if (old) 2609 expire_client(old); 2610 return status; 2611 out_free_conn: 2612 spin_unlock(&nn->client_lock); 2613 free_conn(conn); 2614 if (old) 2615 expire_client(old); 2616 out_free_session: 2617 __free_session(new); 2618 out_release_drc_mem: 2619 nfsd4_put_drc_mem(&cr_ses->fore_channel); 2620 return status; 2621 } 2622 2623 static __be32 nfsd4_map_bcts_dir(u32 *dir) 2624 { 2625 switch (*dir) { 2626 case NFS4_CDFC4_FORE: 2627 case NFS4_CDFC4_BACK: 2628 return nfs_ok; 2629 case NFS4_CDFC4_FORE_OR_BOTH: 2630 case NFS4_CDFC4_BACK_OR_BOTH: 2631 *dir = NFS4_CDFC4_BOTH; 2632 return nfs_ok; 2633 }; 2634 return nfserr_inval; 2635 } 2636 2637 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc) 2638 { 2639 struct nfsd4_session *session = cstate->session; 2640 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2641 __be32 status; 2642 2643 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 2644 if (status) 2645 return status; 2646 spin_lock(&nn->client_lock); 2647 session->se_cb_prog = bc->bc_cb_program; 2648 session->se_cb_sec = bc->bc_cb_sec; 2649 spin_unlock(&nn->client_lock); 2650 2651 nfsd4_probe_callback(session->se_client); 2652 2653 return nfs_ok; 2654 } 2655 2656 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 2657 struct nfsd4_compound_state *cstate, 2658 struct nfsd4_bind_conn_to_session *bcts) 2659 { 2660 __be32 status; 2661 struct nfsd4_conn *conn; 2662 struct nfsd4_session *session; 2663 struct net *net = SVC_NET(rqstp); 2664 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2665 2666 if (!nfsd4_last_compound_op(rqstp)) 2667 return nfserr_not_only_op; 2668 spin_lock(&nn->client_lock); 2669 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 2670 spin_unlock(&nn->client_lock); 2671 if (!session) 2672 goto out_no_session; 2673 status = nfserr_wrong_cred; 2674 if (!mach_creds_match(session->se_client, rqstp)) 2675 goto out; 2676 status = nfsd4_map_bcts_dir(&bcts->dir); 2677 if (status) 2678 goto out; 2679 conn = alloc_conn(rqstp, bcts->dir); 2680 status = nfserr_jukebox; 2681 if (!conn) 2682 goto out; 2683 nfsd4_init_conn(rqstp, conn, session); 2684 status = nfs_ok; 2685 out: 2686 nfsd4_put_session(session); 2687 out_no_session: 2688 return status; 2689 } 2690 2691 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 2692 { 2693 if (!session) 2694 return 0; 2695 return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); 2696 } 2697 2698 __be32 2699 nfsd4_destroy_session(struct svc_rqst *r, 2700 struct nfsd4_compound_state *cstate, 2701 struct nfsd4_destroy_session *sessionid) 2702 { 2703 struct nfsd4_session *ses; 2704 __be32 status; 2705 int ref_held_by_me = 0; 2706 struct net *net = SVC_NET(r); 2707 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2708 2709 status = nfserr_not_only_op; 2710 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 2711 if (!nfsd4_last_compound_op(r)) 2712 goto out; 2713 ref_held_by_me++; 2714 } 2715 dump_sessionid(__func__, &sessionid->sessionid); 2716 spin_lock(&nn->client_lock); 2717 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status); 2718 if (!ses) 2719 goto out_client_lock; 2720 status = nfserr_wrong_cred; 2721 if (!mach_creds_match(ses->se_client, r)) 2722 goto out_put_session; 2723 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 2724 if (status) 2725 goto out_put_session; 2726 unhash_session(ses); 2727 spin_unlock(&nn->client_lock); 2728 2729 nfsd4_probe_callback_sync(ses->se_client); 2730 2731 spin_lock(&nn->client_lock); 2732 status = nfs_ok; 2733 out_put_session: 2734 nfsd4_put_session_locked(ses); 2735 out_client_lock: 2736 spin_unlock(&nn->client_lock); 2737 out: 2738 return status; 2739 } 2740 2741 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 2742 { 2743 struct nfsd4_conn *c; 2744 2745 list_for_each_entry(c, &s->se_conns, cn_persession) { 2746 if (c->cn_xprt == xpt) { 2747 return c; 2748 } 2749 } 2750 return NULL; 2751 } 2752 2753 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 2754 { 2755 struct nfs4_client *clp = ses->se_client; 2756 struct nfsd4_conn *c; 2757 __be32 status = nfs_ok; 2758 int ret; 2759 2760 spin_lock(&clp->cl_lock); 2761 c = __nfsd4_find_conn(new->cn_xprt, ses); 2762 if (c) 2763 goto out_free; 2764 status = nfserr_conn_not_bound_to_session; 2765 if (clp->cl_mach_cred) 2766 goto out_free; 2767 __nfsd4_hash_conn(new, ses); 2768 spin_unlock(&clp->cl_lock); 2769 ret = nfsd4_register_conn(new); 2770 if (ret) 2771 /* oops; xprt is already down: */ 2772 nfsd4_conn_lost(&new->cn_xpt_user); 2773 return nfs_ok; 2774 out_free: 2775 spin_unlock(&clp->cl_lock); 2776 free_conn(new); 2777 return status; 2778 } 2779 2780 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 2781 { 2782 struct nfsd4_compoundargs *args = rqstp->rq_argp; 2783 2784 return args->opcnt > session->se_fchannel.maxops; 2785 } 2786 2787 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 2788 struct nfsd4_session *session) 2789 { 2790 struct xdr_buf *xb = &rqstp->rq_arg; 2791 2792 return xb->len > session->se_fchannel.maxreq_sz; 2793 } 2794 2795 __be32 2796 nfsd4_sequence(struct svc_rqst *rqstp, 2797 struct nfsd4_compound_state *cstate, 2798 struct nfsd4_sequence *seq) 2799 { 2800 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2801 struct xdr_stream *xdr = &resp->xdr; 2802 struct nfsd4_session *session; 2803 struct nfs4_client *clp; 2804 struct nfsd4_slot *slot; 2805 struct nfsd4_conn *conn; 2806 __be32 status; 2807 int buflen; 2808 struct net *net = SVC_NET(rqstp); 2809 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2810 2811 if (resp->opcnt != 1) 2812 return nfserr_sequence_pos; 2813 2814 /* 2815 * Will be either used or freed by nfsd4_sequence_check_conn 2816 * below. 2817 */ 2818 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 2819 if (!conn) 2820 return nfserr_jukebox; 2821 2822 spin_lock(&nn->client_lock); 2823 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 2824 if (!session) 2825 goto out_no_session; 2826 clp = session->se_client; 2827 2828 status = nfserr_too_many_ops; 2829 if (nfsd4_session_too_many_ops(rqstp, session)) 2830 goto out_put_session; 2831 2832 status = nfserr_req_too_big; 2833 if (nfsd4_request_too_big(rqstp, session)) 2834 goto out_put_session; 2835 2836 status = nfserr_badslot; 2837 if (seq->slotid >= session->se_fchannel.maxreqs) 2838 goto out_put_session; 2839 2840 slot = session->se_slots[seq->slotid]; 2841 dprintk("%s: slotid %d\n", __func__, seq->slotid); 2842 2843 /* We do not negotiate the number of slots yet, so set the 2844 * maxslots to the session maxreqs which is used to encode 2845 * sr_highest_slotid and the sr_target_slot id to maxslots */ 2846 seq->maxslots = session->se_fchannel.maxreqs; 2847 2848 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 2849 slot->sl_flags & NFSD4_SLOT_INUSE); 2850 if (status == nfserr_replay_cache) { 2851 status = nfserr_seq_misordered; 2852 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 2853 goto out_put_session; 2854 cstate->slot = slot; 2855 cstate->session = session; 2856 cstate->clp = clp; 2857 /* Return the cached reply status and set cstate->status 2858 * for nfsd4_proc_compound processing */ 2859 status = nfsd4_replay_cache_entry(resp, seq); 2860 cstate->status = nfserr_replay_cache; 2861 goto out; 2862 } 2863 if (status) 2864 goto out_put_session; 2865 2866 status = nfsd4_sequence_check_conn(conn, session); 2867 conn = NULL; 2868 if (status) 2869 goto out_put_session; 2870 2871 buflen = (seq->cachethis) ? 2872 session->se_fchannel.maxresp_cached : 2873 session->se_fchannel.maxresp_sz; 2874 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 2875 nfserr_rep_too_big; 2876 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 2877 goto out_put_session; 2878 svc_reserve(rqstp, buflen); 2879 2880 status = nfs_ok; 2881 /* Success! bump slot seqid */ 2882 slot->sl_seqid = seq->seqid; 2883 slot->sl_flags |= NFSD4_SLOT_INUSE; 2884 if (seq->cachethis) 2885 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 2886 else 2887 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 2888 2889 cstate->slot = slot; 2890 cstate->session = session; 2891 cstate->clp = clp; 2892 2893 out: 2894 switch (clp->cl_cb_state) { 2895 case NFSD4_CB_DOWN: 2896 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 2897 break; 2898 case NFSD4_CB_FAULT: 2899 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 2900 break; 2901 default: 2902 seq->status_flags = 0; 2903 } 2904 if (!list_empty(&clp->cl_revoked)) 2905 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 2906 out_no_session: 2907 if (conn) 2908 free_conn(conn); 2909 spin_unlock(&nn->client_lock); 2910 return status; 2911 out_put_session: 2912 nfsd4_put_session_locked(session); 2913 goto out_no_session; 2914 } 2915 2916 void 2917 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 2918 { 2919 struct nfsd4_compound_state *cs = &resp->cstate; 2920 2921 if (nfsd4_has_session(cs)) { 2922 if (cs->status != nfserr_replay_cache) { 2923 nfsd4_store_cache_entry(resp); 2924 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 2925 } 2926 /* Drop session reference that was taken in nfsd4_sequence() */ 2927 nfsd4_put_session(cs->session); 2928 } else if (cs->clp) 2929 put_client_renew(cs->clp); 2930 } 2931 2932 __be32 2933 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 2934 { 2935 struct nfs4_client *conf, *unconf; 2936 struct nfs4_client *clp = NULL; 2937 __be32 status = 0; 2938 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2939 2940 spin_lock(&nn->client_lock); 2941 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 2942 conf = find_confirmed_client(&dc->clientid, true, nn); 2943 WARN_ON_ONCE(conf && unconf); 2944 2945 if (conf) { 2946 if (client_has_state(conf)) { 2947 status = nfserr_clientid_busy; 2948 goto out; 2949 } 2950 status = mark_client_expired_locked(conf); 2951 if (status) 2952 goto out; 2953 clp = conf; 2954 } else if (unconf) 2955 clp = unconf; 2956 else { 2957 status = nfserr_stale_clientid; 2958 goto out; 2959 } 2960 if (!mach_creds_match(clp, rqstp)) { 2961 clp = NULL; 2962 status = nfserr_wrong_cred; 2963 goto out; 2964 } 2965 unhash_client_locked(clp); 2966 out: 2967 spin_unlock(&nn->client_lock); 2968 if (clp) 2969 expire_client(clp); 2970 return status; 2971 } 2972 2973 __be32 2974 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 2975 { 2976 __be32 status = 0; 2977 2978 if (rc->rca_one_fs) { 2979 if (!cstate->current_fh.fh_dentry) 2980 return nfserr_nofilehandle; 2981 /* 2982 * We don't take advantage of the rca_one_fs case. 2983 * That's OK, it's optional, we can safely ignore it. 2984 */ 2985 return nfs_ok; 2986 } 2987 2988 status = nfserr_complete_already; 2989 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, 2990 &cstate->session->se_client->cl_flags)) 2991 goto out; 2992 2993 status = nfserr_stale_clientid; 2994 if (is_client_expired(cstate->session->se_client)) 2995 /* 2996 * The following error isn't really legal. 2997 * But we only get here if the client just explicitly 2998 * destroyed the client. Surely it no longer cares what 2999 * error it gets back on an operation for the dead 3000 * client. 3001 */ 3002 goto out; 3003 3004 status = nfs_ok; 3005 nfsd4_client_record_create(cstate->session->se_client); 3006 out: 3007 return status; 3008 } 3009 3010 __be32 3011 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3012 struct nfsd4_setclientid *setclid) 3013 { 3014 struct xdr_netobj clname = setclid->se_name; 3015 nfs4_verifier clverifier = setclid->se_verf; 3016 struct nfs4_client *conf, *new; 3017 struct nfs4_client *unconf = NULL; 3018 __be32 status; 3019 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3020 3021 new = create_client(clname, rqstp, &clverifier); 3022 if (new == NULL) 3023 return nfserr_jukebox; 3024 /* Cases below refer to rfc 3530 section 14.2.33: */ 3025 spin_lock(&nn->client_lock); 3026 conf = find_confirmed_client_by_name(&clname, nn); 3027 if (conf) { 3028 /* case 0: */ 3029 status = nfserr_clid_inuse; 3030 if (clp_used_exchangeid(conf)) 3031 goto out; 3032 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 3033 char addr_str[INET6_ADDRSTRLEN]; 3034 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, 3035 sizeof(addr_str)); 3036 dprintk("NFSD: setclientid: string in use by client " 3037 "at %s\n", addr_str); 3038 goto out; 3039 } 3040 } 3041 unconf = find_unconfirmed_client_by_name(&clname, nn); 3042 if (unconf) 3043 unhash_client_locked(unconf); 3044 if (conf && same_verf(&conf->cl_verifier, &clverifier)) 3045 /* case 1: probable callback update */ 3046 copy_clid(new, conf); 3047 else /* case 4 (new client) or cases 2, 3 (client reboot): */ 3048 gen_clid(new, nn); 3049 new->cl_minorversion = 0; 3050 gen_callback(new, setclid, rqstp); 3051 add_to_unconfirmed(new); 3052 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 3053 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 3054 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 3055 new = NULL; 3056 status = nfs_ok; 3057 out: 3058 spin_unlock(&nn->client_lock); 3059 if (new) 3060 free_client(new); 3061 if (unconf) 3062 expire_client(unconf); 3063 return status; 3064 } 3065 3066 3067 __be32 3068 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 3069 struct nfsd4_compound_state *cstate, 3070 struct nfsd4_setclientid_confirm *setclientid_confirm) 3071 { 3072 struct nfs4_client *conf, *unconf; 3073 struct nfs4_client *old = NULL; 3074 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 3075 clientid_t * clid = &setclientid_confirm->sc_clientid; 3076 __be32 status; 3077 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3078 3079 if (STALE_CLIENTID(clid, nn)) 3080 return nfserr_stale_clientid; 3081 3082 spin_lock(&nn->client_lock); 3083 conf = find_confirmed_client(clid, false, nn); 3084 unconf = find_unconfirmed_client(clid, false, nn); 3085 /* 3086 * We try hard to give out unique clientid's, so if we get an 3087 * attempt to confirm the same clientid with a different cred, 3088 * there's a bug somewhere. Let's charitably assume it's our 3089 * bug. 3090 */ 3091 status = nfserr_serverfault; 3092 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) 3093 goto out; 3094 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) 3095 goto out; 3096 /* cases below refer to rfc 3530 section 14.2.34: */ 3097 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 3098 if (conf && !unconf) /* case 2: probable retransmit */ 3099 status = nfs_ok; 3100 else /* case 4: client hasn't noticed we rebooted yet? */ 3101 status = nfserr_stale_clientid; 3102 goto out; 3103 } 3104 status = nfs_ok; 3105 if (conf) { /* case 1: callback update */ 3106 old = unconf; 3107 unhash_client_locked(old); 3108 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 3109 } else { /* case 3: normal case; new or rebooted client */ 3110 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 3111 if (old) { 3112 status = mark_client_expired_locked(old); 3113 if (status) { 3114 old = NULL; 3115 goto out; 3116 } 3117 } 3118 move_to_confirmed(unconf); 3119 conf = unconf; 3120 } 3121 get_client_locked(conf); 3122 spin_unlock(&nn->client_lock); 3123 nfsd4_probe_callback(conf); 3124 spin_lock(&nn->client_lock); 3125 put_client_renew_locked(conf); 3126 out: 3127 spin_unlock(&nn->client_lock); 3128 if (old) 3129 expire_client(old); 3130 return status; 3131 } 3132 3133 static struct nfs4_file *nfsd4_alloc_file(void) 3134 { 3135 return kmem_cache_alloc(file_slab, GFP_KERNEL); 3136 } 3137 3138 /* OPEN Share state helper functions */ 3139 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, 3140 struct nfs4_file *fp) 3141 { 3142 lockdep_assert_held(&state_lock); 3143 3144 atomic_set(&fp->fi_ref, 1); 3145 spin_lock_init(&fp->fi_lock); 3146 INIT_LIST_HEAD(&fp->fi_stateids); 3147 INIT_LIST_HEAD(&fp->fi_delegations); 3148 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 3149 fh_copy_shallow(&fp->fi_fhandle, fh); 3150 fp->fi_deleg_file = NULL; 3151 fp->fi_had_conflict = false; 3152 fp->fi_share_deny = 0; 3153 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 3154 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 3155 #ifdef CONFIG_NFSD_PNFS 3156 INIT_LIST_HEAD(&fp->fi_lo_states); 3157 atomic_set(&fp->fi_lo_recalls, 0); 3158 #endif 3159 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); 3160 } 3161 3162 void 3163 nfsd4_free_slabs(void) 3164 { 3165 kmem_cache_destroy(odstate_slab); 3166 kmem_cache_destroy(openowner_slab); 3167 kmem_cache_destroy(lockowner_slab); 3168 kmem_cache_destroy(file_slab); 3169 kmem_cache_destroy(stateid_slab); 3170 kmem_cache_destroy(deleg_slab); 3171 } 3172 3173 int 3174 nfsd4_init_slabs(void) 3175 { 3176 openowner_slab = kmem_cache_create("nfsd4_openowners", 3177 sizeof(struct nfs4_openowner), 0, 0, NULL); 3178 if (openowner_slab == NULL) 3179 goto out; 3180 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 3181 sizeof(struct nfs4_lockowner), 0, 0, NULL); 3182 if (lockowner_slab == NULL) 3183 goto out_free_openowner_slab; 3184 file_slab = kmem_cache_create("nfsd4_files", 3185 sizeof(struct nfs4_file), 0, 0, NULL); 3186 if (file_slab == NULL) 3187 goto out_free_lockowner_slab; 3188 stateid_slab = kmem_cache_create("nfsd4_stateids", 3189 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 3190 if (stateid_slab == NULL) 3191 goto out_free_file_slab; 3192 deleg_slab = kmem_cache_create("nfsd4_delegations", 3193 sizeof(struct nfs4_delegation), 0, 0, NULL); 3194 if (deleg_slab == NULL) 3195 goto out_free_stateid_slab; 3196 odstate_slab = kmem_cache_create("nfsd4_odstate", 3197 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); 3198 if (odstate_slab == NULL) 3199 goto out_free_deleg_slab; 3200 return 0; 3201 3202 out_free_deleg_slab: 3203 kmem_cache_destroy(deleg_slab); 3204 out_free_stateid_slab: 3205 kmem_cache_destroy(stateid_slab); 3206 out_free_file_slab: 3207 kmem_cache_destroy(file_slab); 3208 out_free_lockowner_slab: 3209 kmem_cache_destroy(lockowner_slab); 3210 out_free_openowner_slab: 3211 kmem_cache_destroy(openowner_slab); 3212 out: 3213 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 3214 return -ENOMEM; 3215 } 3216 3217 static void init_nfs4_replay(struct nfs4_replay *rp) 3218 { 3219 rp->rp_status = nfserr_serverfault; 3220 rp->rp_buflen = 0; 3221 rp->rp_buf = rp->rp_ibuf; 3222 mutex_init(&rp->rp_mutex); 3223 } 3224 3225 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 3226 struct nfs4_stateowner *so) 3227 { 3228 if (!nfsd4_has_session(cstate)) { 3229 mutex_lock(&so->so_replay.rp_mutex); 3230 cstate->replay_owner = nfs4_get_stateowner(so); 3231 } 3232 } 3233 3234 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 3235 { 3236 struct nfs4_stateowner *so = cstate->replay_owner; 3237 3238 if (so != NULL) { 3239 cstate->replay_owner = NULL; 3240 mutex_unlock(&so->so_replay.rp_mutex); 3241 nfs4_put_stateowner(so); 3242 } 3243 } 3244 3245 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 3246 { 3247 struct nfs4_stateowner *sop; 3248 3249 sop = kmem_cache_alloc(slab, GFP_KERNEL); 3250 if (!sop) 3251 return NULL; 3252 3253 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); 3254 if (!sop->so_owner.data) { 3255 kmem_cache_free(slab, sop); 3256 return NULL; 3257 } 3258 sop->so_owner.len = owner->len; 3259 3260 INIT_LIST_HEAD(&sop->so_stateids); 3261 sop->so_client = clp; 3262 init_nfs4_replay(&sop->so_replay); 3263 atomic_set(&sop->so_count, 1); 3264 return sop; 3265 } 3266 3267 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 3268 { 3269 lockdep_assert_held(&clp->cl_lock); 3270 3271 list_add(&oo->oo_owner.so_strhash, 3272 &clp->cl_ownerstr_hashtbl[strhashval]); 3273 list_add(&oo->oo_perclient, &clp->cl_openowners); 3274 } 3275 3276 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 3277 { 3278 unhash_openowner_locked(openowner(so)); 3279 } 3280 3281 static void nfs4_free_openowner(struct nfs4_stateowner *so) 3282 { 3283 struct nfs4_openowner *oo = openowner(so); 3284 3285 kmem_cache_free(openowner_slab, oo); 3286 } 3287 3288 static const struct nfs4_stateowner_operations openowner_ops = { 3289 .so_unhash = nfs4_unhash_openowner, 3290 .so_free = nfs4_free_openowner, 3291 }; 3292 3293 static struct nfs4_openowner * 3294 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 3295 struct nfsd4_compound_state *cstate) 3296 { 3297 struct nfs4_client *clp = cstate->clp; 3298 struct nfs4_openowner *oo, *ret; 3299 3300 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 3301 if (!oo) 3302 return NULL; 3303 oo->oo_owner.so_ops = &openowner_ops; 3304 oo->oo_owner.so_is_open_owner = 1; 3305 oo->oo_owner.so_seqid = open->op_seqid; 3306 oo->oo_flags = 0; 3307 if (nfsd4_has_session(cstate)) 3308 oo->oo_flags |= NFS4_OO_CONFIRMED; 3309 oo->oo_time = 0; 3310 oo->oo_last_closed_stid = NULL; 3311 INIT_LIST_HEAD(&oo->oo_close_lru); 3312 spin_lock(&clp->cl_lock); 3313 ret = find_openstateowner_str_locked(strhashval, open, clp); 3314 if (ret == NULL) { 3315 hash_openowner(oo, clp, strhashval); 3316 ret = oo; 3317 } else 3318 nfs4_free_openowner(&oo->oo_owner); 3319 spin_unlock(&clp->cl_lock); 3320 return ret; 3321 } 3322 3323 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 3324 struct nfs4_openowner *oo = open->op_openowner; 3325 3326 atomic_inc(&stp->st_stid.sc_count); 3327 stp->st_stid.sc_type = NFS4_OPEN_STID; 3328 INIT_LIST_HEAD(&stp->st_locks); 3329 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 3330 get_nfs4_file(fp); 3331 stp->st_stid.sc_file = fp; 3332 stp->st_access_bmap = 0; 3333 stp->st_deny_bmap = 0; 3334 stp->st_openstp = NULL; 3335 spin_lock(&oo->oo_owner.so_client->cl_lock); 3336 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 3337 spin_lock(&fp->fi_lock); 3338 list_add(&stp->st_perfile, &fp->fi_stateids); 3339 spin_unlock(&fp->fi_lock); 3340 spin_unlock(&oo->oo_owner.so_client->cl_lock); 3341 } 3342 3343 /* 3344 * In the 4.0 case we need to keep the owners around a little while to handle 3345 * CLOSE replay. We still do need to release any file access that is held by 3346 * them before returning however. 3347 */ 3348 static void 3349 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 3350 { 3351 struct nfs4_ol_stateid *last; 3352 struct nfs4_openowner *oo = openowner(s->st_stateowner); 3353 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 3354 nfsd_net_id); 3355 3356 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 3357 3358 /* 3359 * We know that we hold one reference via nfsd4_close, and another 3360 * "persistent" reference for the client. If the refcount is higher 3361 * than 2, then there are still calls in progress that are using this 3362 * stateid. We can't put the sc_file reference until they are finished. 3363 * Wait for the refcount to drop to 2. Since it has been unhashed, 3364 * there should be no danger of the refcount going back up again at 3365 * this point. 3366 */ 3367 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2); 3368 3369 release_all_access(s); 3370 if (s->st_stid.sc_file) { 3371 put_nfs4_file(s->st_stid.sc_file); 3372 s->st_stid.sc_file = NULL; 3373 } 3374 3375 spin_lock(&nn->client_lock); 3376 last = oo->oo_last_closed_stid; 3377 oo->oo_last_closed_stid = s; 3378 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 3379 oo->oo_time = get_seconds(); 3380 spin_unlock(&nn->client_lock); 3381 if (last) 3382 nfs4_put_stid(&last->st_stid); 3383 } 3384 3385 /* search file_hashtbl[] for file */ 3386 static struct nfs4_file * 3387 find_file_locked(struct knfsd_fh *fh, unsigned int hashval) 3388 { 3389 struct nfs4_file *fp; 3390 3391 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) { 3392 if (fh_match(&fp->fi_fhandle, fh)) { 3393 if (atomic_inc_not_zero(&fp->fi_ref)) 3394 return fp; 3395 } 3396 } 3397 return NULL; 3398 } 3399 3400 struct nfs4_file * 3401 find_file(struct knfsd_fh *fh) 3402 { 3403 struct nfs4_file *fp; 3404 unsigned int hashval = file_hashval(fh); 3405 3406 rcu_read_lock(); 3407 fp = find_file_locked(fh, hashval); 3408 rcu_read_unlock(); 3409 return fp; 3410 } 3411 3412 static struct nfs4_file * 3413 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) 3414 { 3415 struct nfs4_file *fp; 3416 unsigned int hashval = file_hashval(fh); 3417 3418 rcu_read_lock(); 3419 fp = find_file_locked(fh, hashval); 3420 rcu_read_unlock(); 3421 if (fp) 3422 return fp; 3423 3424 spin_lock(&state_lock); 3425 fp = find_file_locked(fh, hashval); 3426 if (likely(fp == NULL)) { 3427 nfsd4_init_file(fh, hashval, new); 3428 fp = new; 3429 } 3430 spin_unlock(&state_lock); 3431 3432 return fp; 3433 } 3434 3435 /* 3436 * Called to check deny when READ with all zero stateid or 3437 * WRITE with all zero or all one stateid 3438 */ 3439 static __be32 3440 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 3441 { 3442 struct nfs4_file *fp; 3443 __be32 ret = nfs_ok; 3444 3445 fp = find_file(¤t_fh->fh_handle); 3446 if (!fp) 3447 return ret; 3448 /* Check for conflicting share reservations */ 3449 spin_lock(&fp->fi_lock); 3450 if (fp->fi_share_deny & deny_type) 3451 ret = nfserr_locked; 3452 spin_unlock(&fp->fi_lock); 3453 put_nfs4_file(fp); 3454 return ret; 3455 } 3456 3457 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 3458 { 3459 struct nfs4_delegation *dp = cb_to_delegation(cb); 3460 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 3461 nfsd_net_id); 3462 3463 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 3464 3465 /* 3466 * We can't do this in nfsd_break_deleg_cb because it is 3467 * already holding inode->i_lock. 3468 * 3469 * If the dl_time != 0, then we know that it has already been 3470 * queued for a lease break. Don't queue it again. 3471 */ 3472 spin_lock(&state_lock); 3473 if (dp->dl_time == 0) { 3474 dp->dl_time = get_seconds(); 3475 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 3476 } 3477 spin_unlock(&state_lock); 3478 } 3479 3480 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 3481 struct rpc_task *task) 3482 { 3483 struct nfs4_delegation *dp = cb_to_delegation(cb); 3484 3485 switch (task->tk_status) { 3486 case 0: 3487 return 1; 3488 case -EBADHANDLE: 3489 case -NFS4ERR_BAD_STATEID: 3490 /* 3491 * Race: client probably got cb_recall before open reply 3492 * granting delegation. 3493 */ 3494 if (dp->dl_retries--) { 3495 rpc_delay(task, 2 * HZ); 3496 return 0; 3497 } 3498 /*FALLTHRU*/ 3499 default: 3500 return -1; 3501 } 3502 } 3503 3504 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 3505 { 3506 struct nfs4_delegation *dp = cb_to_delegation(cb); 3507 3508 nfs4_put_stid(&dp->dl_stid); 3509 } 3510 3511 static struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 3512 .prepare = nfsd4_cb_recall_prepare, 3513 .done = nfsd4_cb_recall_done, 3514 .release = nfsd4_cb_recall_release, 3515 }; 3516 3517 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 3518 { 3519 /* 3520 * We're assuming the state code never drops its reference 3521 * without first removing the lease. Since we're in this lease 3522 * callback (and since the lease code is serialized by the kernel 3523 * lock) we know the server hasn't removed the lease yet, we know 3524 * it's safe to take a reference. 3525 */ 3526 atomic_inc(&dp->dl_stid.sc_count); 3527 nfsd4_run_cb(&dp->dl_recall); 3528 } 3529 3530 /* Called from break_lease() with i_lock held. */ 3531 static bool 3532 nfsd_break_deleg_cb(struct file_lock *fl) 3533 { 3534 bool ret = false; 3535 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; 3536 struct nfs4_delegation *dp; 3537 3538 if (!fp) { 3539 WARN(1, "(%p)->fl_owner NULL\n", fl); 3540 return ret; 3541 } 3542 if (fp->fi_had_conflict) { 3543 WARN(1, "duplicate break on %p\n", fp); 3544 return ret; 3545 } 3546 /* 3547 * We don't want the locks code to timeout the lease for us; 3548 * we'll remove it ourself if a delegation isn't returned 3549 * in time: 3550 */ 3551 fl->fl_break_time = 0; 3552 3553 spin_lock(&fp->fi_lock); 3554 fp->fi_had_conflict = true; 3555 /* 3556 * If there are no delegations on the list, then return true 3557 * so that the lease code will go ahead and delete it. 3558 */ 3559 if (list_empty(&fp->fi_delegations)) 3560 ret = true; 3561 else 3562 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 3563 nfsd_break_one_deleg(dp); 3564 spin_unlock(&fp->fi_lock); 3565 return ret; 3566 } 3567 3568 static int 3569 nfsd_change_deleg_cb(struct file_lock *onlist, int arg, 3570 struct list_head *dispose) 3571 { 3572 if (arg & F_UNLCK) 3573 return lease_modify(onlist, arg, dispose); 3574 else 3575 return -EAGAIN; 3576 } 3577 3578 static const struct lock_manager_operations nfsd_lease_mng_ops = { 3579 .lm_break = nfsd_break_deleg_cb, 3580 .lm_change = nfsd_change_deleg_cb, 3581 }; 3582 3583 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 3584 { 3585 if (nfsd4_has_session(cstate)) 3586 return nfs_ok; 3587 if (seqid == so->so_seqid - 1) 3588 return nfserr_replay_me; 3589 if (seqid == so->so_seqid) 3590 return nfs_ok; 3591 return nfserr_bad_seqid; 3592 } 3593 3594 static __be32 lookup_clientid(clientid_t *clid, 3595 struct nfsd4_compound_state *cstate, 3596 struct nfsd_net *nn) 3597 { 3598 struct nfs4_client *found; 3599 3600 if (cstate->clp) { 3601 found = cstate->clp; 3602 if (!same_clid(&found->cl_clientid, clid)) 3603 return nfserr_stale_clientid; 3604 return nfs_ok; 3605 } 3606 3607 if (STALE_CLIENTID(clid, nn)) 3608 return nfserr_stale_clientid; 3609 3610 /* 3611 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one 3612 * cached already then we know this is for is for v4.0 and "sessions" 3613 * will be false. 3614 */ 3615 WARN_ON_ONCE(cstate->session); 3616 spin_lock(&nn->client_lock); 3617 found = find_confirmed_client(clid, false, nn); 3618 if (!found) { 3619 spin_unlock(&nn->client_lock); 3620 return nfserr_expired; 3621 } 3622 atomic_inc(&found->cl_refcount); 3623 spin_unlock(&nn->client_lock); 3624 3625 /* Cache the nfs4_client in cstate! */ 3626 cstate->clp = found; 3627 return nfs_ok; 3628 } 3629 3630 __be32 3631 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 3632 struct nfsd4_open *open, struct nfsd_net *nn) 3633 { 3634 clientid_t *clientid = &open->op_clientid; 3635 struct nfs4_client *clp = NULL; 3636 unsigned int strhashval; 3637 struct nfs4_openowner *oo = NULL; 3638 __be32 status; 3639 3640 if (STALE_CLIENTID(&open->op_clientid, nn)) 3641 return nfserr_stale_clientid; 3642 /* 3643 * In case we need it later, after we've already created the 3644 * file and don't want to risk a further failure: 3645 */ 3646 open->op_file = nfsd4_alloc_file(); 3647 if (open->op_file == NULL) 3648 return nfserr_jukebox; 3649 3650 status = lookup_clientid(clientid, cstate, nn); 3651 if (status) 3652 return status; 3653 clp = cstate->clp; 3654 3655 strhashval = ownerstr_hashval(&open->op_owner); 3656 oo = find_openstateowner_str(strhashval, open, clp); 3657 open->op_openowner = oo; 3658 if (!oo) { 3659 goto new_owner; 3660 } 3661 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 3662 /* Replace unconfirmed owners without checking for replay. */ 3663 release_openowner(oo); 3664 open->op_openowner = NULL; 3665 goto new_owner; 3666 } 3667 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 3668 if (status) 3669 return status; 3670 goto alloc_stateid; 3671 new_owner: 3672 oo = alloc_init_open_stateowner(strhashval, open, cstate); 3673 if (oo == NULL) 3674 return nfserr_jukebox; 3675 open->op_openowner = oo; 3676 alloc_stateid: 3677 open->op_stp = nfs4_alloc_open_stateid(clp); 3678 if (!open->op_stp) 3679 return nfserr_jukebox; 3680 3681 if (nfsd4_has_session(cstate) && 3682 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 3683 open->op_odstate = alloc_clnt_odstate(clp); 3684 if (!open->op_odstate) 3685 return nfserr_jukebox; 3686 } 3687 3688 return nfs_ok; 3689 } 3690 3691 static inline __be32 3692 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 3693 { 3694 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 3695 return nfserr_openmode; 3696 else 3697 return nfs_ok; 3698 } 3699 3700 static int share_access_to_flags(u32 share_access) 3701 { 3702 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 3703 } 3704 3705 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 3706 { 3707 struct nfs4_stid *ret; 3708 3709 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); 3710 if (!ret) 3711 return NULL; 3712 return delegstateid(ret); 3713 } 3714 3715 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 3716 { 3717 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 3718 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 3719 } 3720 3721 static __be32 3722 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 3723 struct nfs4_delegation **dp) 3724 { 3725 int flags; 3726 __be32 status = nfserr_bad_stateid; 3727 struct nfs4_delegation *deleg; 3728 3729 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 3730 if (deleg == NULL) 3731 goto out; 3732 flags = share_access_to_flags(open->op_share_access); 3733 status = nfs4_check_delegmode(deleg, flags); 3734 if (status) { 3735 nfs4_put_stid(&deleg->dl_stid); 3736 goto out; 3737 } 3738 *dp = deleg; 3739 out: 3740 if (!nfsd4_is_deleg_cur(open)) 3741 return nfs_ok; 3742 if (status) 3743 return status; 3744 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3745 return nfs_ok; 3746 } 3747 3748 static struct nfs4_ol_stateid * 3749 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 3750 { 3751 struct nfs4_ol_stateid *local, *ret = NULL; 3752 struct nfs4_openowner *oo = open->op_openowner; 3753 3754 spin_lock(&fp->fi_lock); 3755 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 3756 /* ignore lock owners */ 3757 if (local->st_stateowner->so_is_open_owner == 0) 3758 continue; 3759 if (local->st_stateowner == &oo->oo_owner) { 3760 ret = local; 3761 atomic_inc(&ret->st_stid.sc_count); 3762 break; 3763 } 3764 } 3765 spin_unlock(&fp->fi_lock); 3766 return ret; 3767 } 3768 3769 static inline int nfs4_access_to_access(u32 nfs4_access) 3770 { 3771 int flags = 0; 3772 3773 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 3774 flags |= NFSD_MAY_READ; 3775 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 3776 flags |= NFSD_MAY_WRITE; 3777 return flags; 3778 } 3779 3780 static inline __be32 3781 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 3782 struct nfsd4_open *open) 3783 { 3784 struct iattr iattr = { 3785 .ia_valid = ATTR_SIZE, 3786 .ia_size = 0, 3787 }; 3788 if (!open->op_truncate) 3789 return 0; 3790 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 3791 return nfserr_inval; 3792 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 3793 } 3794 3795 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 3796 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 3797 struct nfsd4_open *open) 3798 { 3799 struct file *filp = NULL; 3800 __be32 status; 3801 int oflag = nfs4_access_to_omode(open->op_share_access); 3802 int access = nfs4_access_to_access(open->op_share_access); 3803 unsigned char old_access_bmap, old_deny_bmap; 3804 3805 spin_lock(&fp->fi_lock); 3806 3807 /* 3808 * Are we trying to set a deny mode that would conflict with 3809 * current access? 3810 */ 3811 status = nfs4_file_check_deny(fp, open->op_share_deny); 3812 if (status != nfs_ok) { 3813 spin_unlock(&fp->fi_lock); 3814 goto out; 3815 } 3816 3817 /* set access to the file */ 3818 status = nfs4_file_get_access(fp, open->op_share_access); 3819 if (status != nfs_ok) { 3820 spin_unlock(&fp->fi_lock); 3821 goto out; 3822 } 3823 3824 /* Set access bits in stateid */ 3825 old_access_bmap = stp->st_access_bmap; 3826 set_access(open->op_share_access, stp); 3827 3828 /* Set new deny mask */ 3829 old_deny_bmap = stp->st_deny_bmap; 3830 set_deny(open->op_share_deny, stp); 3831 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 3832 3833 if (!fp->fi_fds[oflag]) { 3834 spin_unlock(&fp->fi_lock); 3835 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp); 3836 if (status) 3837 goto out_put_access; 3838 spin_lock(&fp->fi_lock); 3839 if (!fp->fi_fds[oflag]) { 3840 fp->fi_fds[oflag] = filp; 3841 filp = NULL; 3842 } 3843 } 3844 spin_unlock(&fp->fi_lock); 3845 if (filp) 3846 fput(filp); 3847 3848 status = nfsd4_truncate(rqstp, cur_fh, open); 3849 if (status) 3850 goto out_put_access; 3851 out: 3852 return status; 3853 out_put_access: 3854 stp->st_access_bmap = old_access_bmap; 3855 nfs4_file_put_access(fp, open->op_share_access); 3856 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 3857 goto out; 3858 } 3859 3860 static __be32 3861 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 3862 { 3863 __be32 status; 3864 unsigned char old_deny_bmap; 3865 3866 if (!test_access(open->op_share_access, stp)) 3867 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open); 3868 3869 /* test and set deny mode */ 3870 spin_lock(&fp->fi_lock); 3871 status = nfs4_file_check_deny(fp, open->op_share_deny); 3872 if (status == nfs_ok) { 3873 old_deny_bmap = stp->st_deny_bmap; 3874 set_deny(open->op_share_deny, stp); 3875 fp->fi_share_deny |= 3876 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 3877 } 3878 spin_unlock(&fp->fi_lock); 3879 3880 if (status != nfs_ok) 3881 return status; 3882 3883 status = nfsd4_truncate(rqstp, cur_fh, open); 3884 if (status != nfs_ok) 3885 reset_union_bmap_deny(old_deny_bmap, stp); 3886 return status; 3887 } 3888 3889 static void 3890 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) 3891 { 3892 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3893 } 3894 3895 /* Should we give out recallable state?: */ 3896 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 3897 { 3898 if (clp->cl_cb_state == NFSD4_CB_UP) 3899 return true; 3900 /* 3901 * In the sessions case, since we don't have to establish a 3902 * separate connection for callbacks, we assume it's OK 3903 * until we hear otherwise: 3904 */ 3905 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 3906 } 3907 3908 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag) 3909 { 3910 struct file_lock *fl; 3911 3912 fl = locks_alloc_lock(); 3913 if (!fl) 3914 return NULL; 3915 fl->fl_lmops = &nfsd_lease_mng_ops; 3916 fl->fl_flags = FL_DELEG; 3917 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 3918 fl->fl_end = OFFSET_MAX; 3919 fl->fl_owner = (fl_owner_t)fp; 3920 fl->fl_pid = current->tgid; 3921 return fl; 3922 } 3923 3924 static int nfs4_setlease(struct nfs4_delegation *dp) 3925 { 3926 struct nfs4_file *fp = dp->dl_stid.sc_file; 3927 struct file_lock *fl, *ret; 3928 struct file *filp; 3929 int status = 0; 3930 3931 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ); 3932 if (!fl) 3933 return -ENOMEM; 3934 filp = find_readable_file(fp); 3935 if (!filp) { 3936 /* We should always have a readable file here */ 3937 WARN_ON_ONCE(1); 3938 return -EBADF; 3939 } 3940 fl->fl_file = filp; 3941 ret = fl; 3942 status = vfs_setlease(filp, fl->fl_type, &fl, NULL); 3943 if (fl) 3944 locks_free_lock(fl); 3945 if (status) 3946 goto out_fput; 3947 spin_lock(&state_lock); 3948 spin_lock(&fp->fi_lock); 3949 /* Did the lease get broken before we took the lock? */ 3950 status = -EAGAIN; 3951 if (fp->fi_had_conflict) 3952 goto out_unlock; 3953 /* Race breaker */ 3954 if (fp->fi_deleg_file) { 3955 status = 0; 3956 ++fp->fi_delegees; 3957 hash_delegation_locked(dp, fp); 3958 goto out_unlock; 3959 } 3960 fp->fi_deleg_file = filp; 3961 fp->fi_delegees = 1; 3962 hash_delegation_locked(dp, fp); 3963 spin_unlock(&fp->fi_lock); 3964 spin_unlock(&state_lock); 3965 return 0; 3966 out_unlock: 3967 spin_unlock(&fp->fi_lock); 3968 spin_unlock(&state_lock); 3969 out_fput: 3970 fput(filp); 3971 return status; 3972 } 3973 3974 static struct nfs4_delegation * 3975 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, 3976 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) 3977 { 3978 int status; 3979 struct nfs4_delegation *dp; 3980 3981 if (fp->fi_had_conflict) 3982 return ERR_PTR(-EAGAIN); 3983 3984 dp = alloc_init_deleg(clp, fh, odstate); 3985 if (!dp) 3986 return ERR_PTR(-ENOMEM); 3987 3988 get_nfs4_file(fp); 3989 spin_lock(&state_lock); 3990 spin_lock(&fp->fi_lock); 3991 dp->dl_stid.sc_file = fp; 3992 if (!fp->fi_deleg_file) { 3993 spin_unlock(&fp->fi_lock); 3994 spin_unlock(&state_lock); 3995 status = nfs4_setlease(dp); 3996 goto out; 3997 } 3998 if (fp->fi_had_conflict) { 3999 status = -EAGAIN; 4000 goto out_unlock; 4001 } 4002 ++fp->fi_delegees; 4003 hash_delegation_locked(dp, fp); 4004 status = 0; 4005 out_unlock: 4006 spin_unlock(&fp->fi_lock); 4007 spin_unlock(&state_lock); 4008 out: 4009 if (status) { 4010 put_clnt_odstate(dp->dl_clnt_odstate); 4011 nfs4_put_stid(&dp->dl_stid); 4012 return ERR_PTR(status); 4013 } 4014 return dp; 4015 } 4016 4017 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 4018 { 4019 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4020 if (status == -EAGAIN) 4021 open->op_why_no_deleg = WND4_CONTENTION; 4022 else { 4023 open->op_why_no_deleg = WND4_RESOURCE; 4024 switch (open->op_deleg_want) { 4025 case NFS4_SHARE_WANT_READ_DELEG: 4026 case NFS4_SHARE_WANT_WRITE_DELEG: 4027 case NFS4_SHARE_WANT_ANY_DELEG: 4028 break; 4029 case NFS4_SHARE_WANT_CANCEL: 4030 open->op_why_no_deleg = WND4_CANCELLED; 4031 break; 4032 case NFS4_SHARE_WANT_NO_DELEG: 4033 WARN_ON_ONCE(1); 4034 } 4035 } 4036 } 4037 4038 /* 4039 * Attempt to hand out a delegation. 4040 * 4041 * Note we don't support write delegations, and won't until the vfs has 4042 * proper support for them. 4043 */ 4044 static void 4045 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, 4046 struct nfs4_ol_stateid *stp) 4047 { 4048 struct nfs4_delegation *dp; 4049 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 4050 struct nfs4_client *clp = stp->st_stid.sc_client; 4051 int cb_up; 4052 int status = 0; 4053 4054 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 4055 open->op_recall = 0; 4056 switch (open->op_claim_type) { 4057 case NFS4_OPEN_CLAIM_PREVIOUS: 4058 if (!cb_up) 4059 open->op_recall = 1; 4060 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ) 4061 goto out_no_deleg; 4062 break; 4063 case NFS4_OPEN_CLAIM_NULL: 4064 case NFS4_OPEN_CLAIM_FH: 4065 /* 4066 * Let's not give out any delegations till everyone's 4067 * had the chance to reclaim theirs.... 4068 */ 4069 if (locks_in_grace(clp->net)) 4070 goto out_no_deleg; 4071 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 4072 goto out_no_deleg; 4073 /* 4074 * Also, if the file was opened for write or 4075 * create, there's a good chance the client's 4076 * about to write to it, resulting in an 4077 * immediate recall (since we don't support 4078 * write delegations): 4079 */ 4080 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 4081 goto out_no_deleg; 4082 if (open->op_create == NFS4_OPEN_CREATE) 4083 goto out_no_deleg; 4084 break; 4085 default: 4086 goto out_no_deleg; 4087 } 4088 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); 4089 if (IS_ERR(dp)) 4090 goto out_no_deleg; 4091 4092 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 4093 4094 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 4095 STATEID_VAL(&dp->dl_stid.sc_stateid)); 4096 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 4097 nfs4_put_stid(&dp->dl_stid); 4098 return; 4099 out_no_deleg: 4100 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 4101 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 4102 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { 4103 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 4104 open->op_recall = 1; 4105 } 4106 4107 /* 4.1 client asking for a delegation? */ 4108 if (open->op_deleg_want) 4109 nfsd4_open_deleg_none_ext(open, status); 4110 return; 4111 } 4112 4113 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 4114 struct nfs4_delegation *dp) 4115 { 4116 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 4117 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 4118 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4119 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 4120 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 4121 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 4122 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4123 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 4124 } 4125 /* Otherwise the client must be confused wanting a delegation 4126 * it already has, therefore we don't return 4127 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 4128 */ 4129 } 4130 4131 __be32 4132 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 4133 { 4134 struct nfsd4_compoundres *resp = rqstp->rq_resp; 4135 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 4136 struct nfs4_file *fp = NULL; 4137 struct nfs4_ol_stateid *stp = NULL; 4138 struct nfs4_delegation *dp = NULL; 4139 __be32 status; 4140 4141 /* 4142 * Lookup file; if found, lookup stateid and check open request, 4143 * and check for delegations in the process of being recalled. 4144 * If not found, create the nfs4_file struct 4145 */ 4146 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle); 4147 if (fp != open->op_file) { 4148 status = nfs4_check_deleg(cl, open, &dp); 4149 if (status) 4150 goto out; 4151 stp = nfsd4_find_existing_open(fp, open); 4152 } else { 4153 open->op_file = NULL; 4154 status = nfserr_bad_stateid; 4155 if (nfsd4_is_deleg_cur(open)) 4156 goto out; 4157 } 4158 4159 /* 4160 * OPEN the file, or upgrade an existing OPEN. 4161 * If truncate fails, the OPEN fails. 4162 */ 4163 if (stp) { 4164 /* Stateid was found, this is an OPEN upgrade */ 4165 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 4166 if (status) 4167 goto out; 4168 } else { 4169 stp = open->op_stp; 4170 open->op_stp = NULL; 4171 init_open_stateid(stp, fp, open); 4172 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); 4173 if (status) { 4174 release_open_stateid(stp); 4175 goto out; 4176 } 4177 4178 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 4179 open->op_odstate); 4180 if (stp->st_clnt_odstate == open->op_odstate) 4181 open->op_odstate = NULL; 4182 } 4183 update_stateid(&stp->st_stid.sc_stateid); 4184 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4185 4186 if (nfsd4_has_session(&resp->cstate)) { 4187 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 4188 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4189 open->op_why_no_deleg = WND4_NOT_WANTED; 4190 goto nodeleg; 4191 } 4192 } 4193 4194 /* 4195 * Attempt to hand out a delegation. No error return, because the 4196 * OPEN succeeds even if we fail. 4197 */ 4198 nfs4_open_delegation(current_fh, open, stp); 4199 nodeleg: 4200 status = nfs_ok; 4201 4202 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 4203 STATEID_VAL(&stp->st_stid.sc_stateid)); 4204 out: 4205 /* 4.1 client trying to upgrade/downgrade delegation? */ 4206 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 4207 open->op_deleg_want) 4208 nfsd4_deleg_xgrade_none_ext(open, dp); 4209 4210 if (fp) 4211 put_nfs4_file(fp); 4212 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 4213 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate)); 4214 /* 4215 * To finish the open response, we just need to set the rflags. 4216 */ 4217 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 4218 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && 4219 !nfsd4_has_session(&resp->cstate)) 4220 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 4221 if (dp) 4222 nfs4_put_stid(&dp->dl_stid); 4223 if (stp) 4224 nfs4_put_stid(&stp->st_stid); 4225 4226 return status; 4227 } 4228 4229 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 4230 struct nfsd4_open *open) 4231 { 4232 if (open->op_openowner) { 4233 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; 4234 4235 nfsd4_cstate_assign_replay(cstate, so); 4236 nfs4_put_stateowner(so); 4237 } 4238 if (open->op_file) 4239 kmem_cache_free(file_slab, open->op_file); 4240 if (open->op_stp) 4241 nfs4_put_stid(&open->op_stp->st_stid); 4242 if (open->op_odstate) 4243 kmem_cache_free(odstate_slab, open->op_odstate); 4244 } 4245 4246 __be32 4247 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4248 clientid_t *clid) 4249 { 4250 struct nfs4_client *clp; 4251 __be32 status; 4252 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4253 4254 dprintk("process_renew(%08x/%08x): starting\n", 4255 clid->cl_boot, clid->cl_id); 4256 status = lookup_clientid(clid, cstate, nn); 4257 if (status) 4258 goto out; 4259 clp = cstate->clp; 4260 status = nfserr_cb_path_down; 4261 if (!list_empty(&clp->cl_delegations) 4262 && clp->cl_cb_state != NFSD4_CB_UP) 4263 goto out; 4264 status = nfs_ok; 4265 out: 4266 return status; 4267 } 4268 4269 void 4270 nfsd4_end_grace(struct nfsd_net *nn) 4271 { 4272 /* do nothing if grace period already ended */ 4273 if (nn->grace_ended) 4274 return; 4275 4276 dprintk("NFSD: end of grace period\n"); 4277 nn->grace_ended = true; 4278 /* 4279 * If the server goes down again right now, an NFSv4 4280 * client will still be allowed to reclaim after it comes back up, 4281 * even if it hasn't yet had a chance to reclaim state this time. 4282 * 4283 */ 4284 nfsd4_record_grace_done(nn); 4285 /* 4286 * At this point, NFSv4 clients can still reclaim. But if the 4287 * server crashes, any that have not yet reclaimed will be out 4288 * of luck on the next boot. 4289 * 4290 * (NFSv4.1+ clients are considered to have reclaimed once they 4291 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 4292 * have reclaimed after their first OPEN.) 4293 */ 4294 locks_end_grace(&nn->nfsd4_manager); 4295 /* 4296 * At this point, and once lockd and/or any other containers 4297 * exit their grace period, further reclaims will fail and 4298 * regular locking can resume. 4299 */ 4300 } 4301 4302 static time_t 4303 nfs4_laundromat(struct nfsd_net *nn) 4304 { 4305 struct nfs4_client *clp; 4306 struct nfs4_openowner *oo; 4307 struct nfs4_delegation *dp; 4308 struct nfs4_ol_stateid *stp; 4309 struct list_head *pos, *next, reaplist; 4310 time_t cutoff = get_seconds() - nn->nfsd4_lease; 4311 time_t t, new_timeo = nn->nfsd4_lease; 4312 4313 dprintk("NFSD: laundromat service - starting\n"); 4314 nfsd4_end_grace(nn); 4315 INIT_LIST_HEAD(&reaplist); 4316 spin_lock(&nn->client_lock); 4317 list_for_each_safe(pos, next, &nn->client_lru) { 4318 clp = list_entry(pos, struct nfs4_client, cl_lru); 4319 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 4320 t = clp->cl_time - cutoff; 4321 new_timeo = min(new_timeo, t); 4322 break; 4323 } 4324 if (mark_client_expired_locked(clp)) { 4325 dprintk("NFSD: client in use (clientid %08x)\n", 4326 clp->cl_clientid.cl_id); 4327 continue; 4328 } 4329 list_add(&clp->cl_lru, &reaplist); 4330 } 4331 spin_unlock(&nn->client_lock); 4332 list_for_each_safe(pos, next, &reaplist) { 4333 clp = list_entry(pos, struct nfs4_client, cl_lru); 4334 dprintk("NFSD: purging unused client (clientid %08x)\n", 4335 clp->cl_clientid.cl_id); 4336 list_del_init(&clp->cl_lru); 4337 expire_client(clp); 4338 } 4339 spin_lock(&state_lock); 4340 list_for_each_safe(pos, next, &nn->del_recall_lru) { 4341 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4342 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn) 4343 continue; 4344 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 4345 t = dp->dl_time - cutoff; 4346 new_timeo = min(new_timeo, t); 4347 break; 4348 } 4349 unhash_delegation_locked(dp); 4350 list_add(&dp->dl_recall_lru, &reaplist); 4351 } 4352 spin_unlock(&state_lock); 4353 while (!list_empty(&reaplist)) { 4354 dp = list_first_entry(&reaplist, struct nfs4_delegation, 4355 dl_recall_lru); 4356 list_del_init(&dp->dl_recall_lru); 4357 revoke_delegation(dp); 4358 } 4359 4360 spin_lock(&nn->client_lock); 4361 while (!list_empty(&nn->close_lru)) { 4362 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 4363 oo_close_lru); 4364 if (time_after((unsigned long)oo->oo_time, 4365 (unsigned long)cutoff)) { 4366 t = oo->oo_time - cutoff; 4367 new_timeo = min(new_timeo, t); 4368 break; 4369 } 4370 list_del_init(&oo->oo_close_lru); 4371 stp = oo->oo_last_closed_stid; 4372 oo->oo_last_closed_stid = NULL; 4373 spin_unlock(&nn->client_lock); 4374 nfs4_put_stid(&stp->st_stid); 4375 spin_lock(&nn->client_lock); 4376 } 4377 spin_unlock(&nn->client_lock); 4378 4379 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 4380 return new_timeo; 4381 } 4382 4383 static struct workqueue_struct *laundry_wq; 4384 static void laundromat_main(struct work_struct *); 4385 4386 static void 4387 laundromat_main(struct work_struct *laundry) 4388 { 4389 time_t t; 4390 struct delayed_work *dwork = container_of(laundry, struct delayed_work, 4391 work); 4392 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 4393 laundromat_work); 4394 4395 t = nfs4_laundromat(nn); 4396 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 4397 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 4398 } 4399 4400 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 4401 { 4402 if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) 4403 return nfserr_bad_stateid; 4404 return nfs_ok; 4405 } 4406 4407 static inline int 4408 access_permit_read(struct nfs4_ol_stateid *stp) 4409 { 4410 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 4411 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 4412 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 4413 } 4414 4415 static inline int 4416 access_permit_write(struct nfs4_ol_stateid *stp) 4417 { 4418 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 4419 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 4420 } 4421 4422 static 4423 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 4424 { 4425 __be32 status = nfserr_openmode; 4426 4427 /* For lock stateid's, we test the parent open, not the lock: */ 4428 if (stp->st_openstp) 4429 stp = stp->st_openstp; 4430 if ((flags & WR_STATE) && !access_permit_write(stp)) 4431 goto out; 4432 if ((flags & RD_STATE) && !access_permit_read(stp)) 4433 goto out; 4434 status = nfs_ok; 4435 out: 4436 return status; 4437 } 4438 4439 static inline __be32 4440 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 4441 { 4442 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 4443 return nfs_ok; 4444 else if (locks_in_grace(net)) { 4445 /* Answer in remaining cases depends on existence of 4446 * conflicting state; so we must wait out the grace period. */ 4447 return nfserr_grace; 4448 } else if (flags & WR_STATE) 4449 return nfs4_share_conflict(current_fh, 4450 NFS4_SHARE_DENY_WRITE); 4451 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 4452 return nfs4_share_conflict(current_fh, 4453 NFS4_SHARE_DENY_READ); 4454 } 4455 4456 /* 4457 * Allow READ/WRITE during grace period on recovered state only for files 4458 * that are not able to provide mandatory locking. 4459 */ 4460 static inline int 4461 grace_disallows_io(struct net *net, struct inode *inode) 4462 { 4463 return locks_in_grace(net) && mandatory_lock(inode); 4464 } 4465 4466 /* Returns true iff a is later than b: */ 4467 static bool stateid_generation_after(stateid_t *a, stateid_t *b) 4468 { 4469 return (s32)(a->si_generation - b->si_generation) > 0; 4470 } 4471 4472 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 4473 { 4474 /* 4475 * When sessions are used the stateid generation number is ignored 4476 * when it is zero. 4477 */ 4478 if (has_session && in->si_generation == 0) 4479 return nfs_ok; 4480 4481 if (in->si_generation == ref->si_generation) 4482 return nfs_ok; 4483 4484 /* If the client sends us a stateid from the future, it's buggy: */ 4485 if (stateid_generation_after(in, ref)) 4486 return nfserr_bad_stateid; 4487 /* 4488 * However, we could see a stateid from the past, even from a 4489 * non-buggy client. For example, if the client sends a lock 4490 * while some IO is outstanding, the lock may bump si_generation 4491 * while the IO is still in flight. The client could avoid that 4492 * situation by waiting for responses on all the IO requests, 4493 * but better performance may result in retrying IO that 4494 * receives an old_stateid error if requests are rarely 4495 * reordered in flight: 4496 */ 4497 return nfserr_old_stateid; 4498 } 4499 4500 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 4501 { 4502 if (ols->st_stateowner->so_is_open_owner && 4503 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 4504 return nfserr_bad_stateid; 4505 return nfs_ok; 4506 } 4507 4508 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 4509 { 4510 struct nfs4_stid *s; 4511 __be32 status = nfserr_bad_stateid; 4512 4513 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4514 return status; 4515 /* Client debugging aid. */ 4516 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 4517 char addr_str[INET6_ADDRSTRLEN]; 4518 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, 4519 sizeof(addr_str)); 4520 pr_warn_ratelimited("NFSD: client %s testing state ID " 4521 "with incorrect client ID\n", addr_str); 4522 return status; 4523 } 4524 spin_lock(&cl->cl_lock); 4525 s = find_stateid_locked(cl, stateid); 4526 if (!s) 4527 goto out_unlock; 4528 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 4529 if (status) 4530 goto out_unlock; 4531 switch (s->sc_type) { 4532 case NFS4_DELEG_STID: 4533 status = nfs_ok; 4534 break; 4535 case NFS4_REVOKED_DELEG_STID: 4536 status = nfserr_deleg_revoked; 4537 break; 4538 case NFS4_OPEN_STID: 4539 case NFS4_LOCK_STID: 4540 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 4541 break; 4542 default: 4543 printk("unknown stateid type %x\n", s->sc_type); 4544 /* Fallthrough */ 4545 case NFS4_CLOSED_STID: 4546 case NFS4_CLOSED_DELEG_STID: 4547 status = nfserr_bad_stateid; 4548 } 4549 out_unlock: 4550 spin_unlock(&cl->cl_lock); 4551 return status; 4552 } 4553 4554 __be32 4555 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 4556 stateid_t *stateid, unsigned char typemask, 4557 struct nfs4_stid **s, struct nfsd_net *nn) 4558 { 4559 __be32 status; 4560 4561 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4562 return nfserr_bad_stateid; 4563 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); 4564 if (status == nfserr_stale_clientid) { 4565 if (cstate->session) 4566 return nfserr_bad_stateid; 4567 return nfserr_stale_stateid; 4568 } 4569 if (status) 4570 return status; 4571 *s = find_stateid_by_type(cstate->clp, stateid, typemask); 4572 if (!*s) 4573 return nfserr_bad_stateid; 4574 return nfs_ok; 4575 } 4576 4577 /* 4578 * Checks for stateid operations 4579 */ 4580 __be32 4581 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, 4582 stateid_t *stateid, int flags, struct file **filpp) 4583 { 4584 struct nfs4_stid *s; 4585 struct nfs4_ol_stateid *stp = NULL; 4586 struct nfs4_delegation *dp = NULL; 4587 struct svc_fh *current_fh = &cstate->current_fh; 4588 struct inode *ino = d_inode(current_fh->fh_dentry); 4589 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4590 struct file *file = NULL; 4591 __be32 status; 4592 4593 if (filpp) 4594 *filpp = NULL; 4595 4596 if (grace_disallows_io(net, ino)) 4597 return nfserr_grace; 4598 4599 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4600 return check_special_stateids(net, current_fh, stateid, flags); 4601 4602 status = nfsd4_lookup_stateid(cstate, stateid, 4603 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 4604 &s, nn); 4605 if (status) 4606 return status; 4607 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); 4608 if (status) 4609 goto out; 4610 switch (s->sc_type) { 4611 case NFS4_DELEG_STID: 4612 dp = delegstateid(s); 4613 status = nfs4_check_delegmode(dp, flags); 4614 if (status) 4615 goto out; 4616 if (filpp) { 4617 file = dp->dl_stid.sc_file->fi_deleg_file; 4618 if (!file) { 4619 WARN_ON_ONCE(1); 4620 status = nfserr_serverfault; 4621 goto out; 4622 } 4623 get_file(file); 4624 } 4625 break; 4626 case NFS4_OPEN_STID: 4627 case NFS4_LOCK_STID: 4628 stp = openlockstateid(s); 4629 status = nfs4_check_fh(current_fh, stp); 4630 if (status) 4631 goto out; 4632 status = nfsd4_check_openowner_confirmed(stp); 4633 if (status) 4634 goto out; 4635 status = nfs4_check_openmode(stp, flags); 4636 if (status) 4637 goto out; 4638 if (filpp) { 4639 struct nfs4_file *fp = stp->st_stid.sc_file; 4640 4641 if (flags & RD_STATE) 4642 file = find_readable_file(fp); 4643 else 4644 file = find_writeable_file(fp); 4645 } 4646 break; 4647 default: 4648 status = nfserr_bad_stateid; 4649 goto out; 4650 } 4651 status = nfs_ok; 4652 if (file) 4653 *filpp = file; 4654 out: 4655 nfs4_put_stid(s); 4656 return status; 4657 } 4658 4659 /* 4660 * Test if the stateid is valid 4661 */ 4662 __be32 4663 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4664 struct nfsd4_test_stateid *test_stateid) 4665 { 4666 struct nfsd4_test_stateid_id *stateid; 4667 struct nfs4_client *cl = cstate->session->se_client; 4668 4669 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 4670 stateid->ts_id_status = 4671 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 4672 4673 return nfs_ok; 4674 } 4675 4676 __be32 4677 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4678 struct nfsd4_free_stateid *free_stateid) 4679 { 4680 stateid_t *stateid = &free_stateid->fr_stateid; 4681 struct nfs4_stid *s; 4682 struct nfs4_delegation *dp; 4683 struct nfs4_ol_stateid *stp; 4684 struct nfs4_client *cl = cstate->session->se_client; 4685 __be32 ret = nfserr_bad_stateid; 4686 4687 spin_lock(&cl->cl_lock); 4688 s = find_stateid_locked(cl, stateid); 4689 if (!s) 4690 goto out_unlock; 4691 switch (s->sc_type) { 4692 case NFS4_DELEG_STID: 4693 ret = nfserr_locks_held; 4694 break; 4695 case NFS4_OPEN_STID: 4696 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 4697 if (ret) 4698 break; 4699 ret = nfserr_locks_held; 4700 break; 4701 case NFS4_LOCK_STID: 4702 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 4703 if (ret) 4704 break; 4705 stp = openlockstateid(s); 4706 ret = nfserr_locks_held; 4707 if (check_for_locks(stp->st_stid.sc_file, 4708 lockowner(stp->st_stateowner))) 4709 break; 4710 unhash_lock_stateid(stp); 4711 spin_unlock(&cl->cl_lock); 4712 nfs4_put_stid(s); 4713 ret = nfs_ok; 4714 goto out; 4715 case NFS4_REVOKED_DELEG_STID: 4716 dp = delegstateid(s); 4717 list_del_init(&dp->dl_recall_lru); 4718 spin_unlock(&cl->cl_lock); 4719 nfs4_put_stid(s); 4720 ret = nfs_ok; 4721 goto out; 4722 /* Default falls through and returns nfserr_bad_stateid */ 4723 } 4724 out_unlock: 4725 spin_unlock(&cl->cl_lock); 4726 out: 4727 return ret; 4728 } 4729 4730 static inline int 4731 setlkflg (int type) 4732 { 4733 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 4734 RD_STATE : WR_STATE; 4735 } 4736 4737 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 4738 { 4739 struct svc_fh *current_fh = &cstate->current_fh; 4740 struct nfs4_stateowner *sop = stp->st_stateowner; 4741 __be32 status; 4742 4743 status = nfsd4_check_seqid(cstate, sop, seqid); 4744 if (status) 4745 return status; 4746 if (stp->st_stid.sc_type == NFS4_CLOSED_STID 4747 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) 4748 /* 4749 * "Closed" stateid's exist *only* to return 4750 * nfserr_replay_me from the previous step, and 4751 * revoked delegations are kept only for free_stateid. 4752 */ 4753 return nfserr_bad_stateid; 4754 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4755 if (status) 4756 return status; 4757 return nfs4_check_fh(current_fh, stp); 4758 } 4759 4760 /* 4761 * Checks for sequence id mutating operations. 4762 */ 4763 static __be32 4764 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 4765 stateid_t *stateid, char typemask, 4766 struct nfs4_ol_stateid **stpp, 4767 struct nfsd_net *nn) 4768 { 4769 __be32 status; 4770 struct nfs4_stid *s; 4771 struct nfs4_ol_stateid *stp = NULL; 4772 4773 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 4774 seqid, STATEID_VAL(stateid)); 4775 4776 *stpp = NULL; 4777 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); 4778 if (status) 4779 return status; 4780 stp = openlockstateid(s); 4781 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); 4782 4783 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 4784 if (!status) 4785 *stpp = stp; 4786 else 4787 nfs4_put_stid(&stp->st_stid); 4788 return status; 4789 } 4790 4791 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 4792 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 4793 { 4794 __be32 status; 4795 struct nfs4_openowner *oo; 4796 struct nfs4_ol_stateid *stp; 4797 4798 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 4799 NFS4_OPEN_STID, &stp, nn); 4800 if (status) 4801 return status; 4802 oo = openowner(stp->st_stateowner); 4803 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 4804 nfs4_put_stid(&stp->st_stid); 4805 return nfserr_bad_stateid; 4806 } 4807 *stpp = stp; 4808 return nfs_ok; 4809 } 4810 4811 __be32 4812 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4813 struct nfsd4_open_confirm *oc) 4814 { 4815 __be32 status; 4816 struct nfs4_openowner *oo; 4817 struct nfs4_ol_stateid *stp; 4818 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4819 4820 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 4821 cstate->current_fh.fh_dentry); 4822 4823 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 4824 if (status) 4825 return status; 4826 4827 status = nfs4_preprocess_seqid_op(cstate, 4828 oc->oc_seqid, &oc->oc_req_stateid, 4829 NFS4_OPEN_STID, &stp, nn); 4830 if (status) 4831 goto out; 4832 oo = openowner(stp->st_stateowner); 4833 status = nfserr_bad_stateid; 4834 if (oo->oo_flags & NFS4_OO_CONFIRMED) 4835 goto put_stateid; 4836 oo->oo_flags |= NFS4_OO_CONFIRMED; 4837 update_stateid(&stp->st_stid.sc_stateid); 4838 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4839 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 4840 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 4841 4842 nfsd4_client_record_create(oo->oo_owner.so_client); 4843 status = nfs_ok; 4844 put_stateid: 4845 nfs4_put_stid(&stp->st_stid); 4846 out: 4847 nfsd4_bump_seqid(cstate, status); 4848 return status; 4849 } 4850 4851 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 4852 { 4853 if (!test_access(access, stp)) 4854 return; 4855 nfs4_file_put_access(stp->st_stid.sc_file, access); 4856 clear_access(access, stp); 4857 } 4858 4859 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 4860 { 4861 switch (to_access) { 4862 case NFS4_SHARE_ACCESS_READ: 4863 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 4864 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 4865 break; 4866 case NFS4_SHARE_ACCESS_WRITE: 4867 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 4868 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 4869 break; 4870 case NFS4_SHARE_ACCESS_BOTH: 4871 break; 4872 default: 4873 WARN_ON_ONCE(1); 4874 } 4875 } 4876 4877 __be32 4878 nfsd4_open_downgrade(struct svc_rqst *rqstp, 4879 struct nfsd4_compound_state *cstate, 4880 struct nfsd4_open_downgrade *od) 4881 { 4882 __be32 status; 4883 struct nfs4_ol_stateid *stp; 4884 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4885 4886 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 4887 cstate->current_fh.fh_dentry); 4888 4889 /* We don't yet support WANT bits: */ 4890 if (od->od_deleg_want) 4891 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 4892 od->od_deleg_want); 4893 4894 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 4895 &od->od_stateid, &stp, nn); 4896 if (status) 4897 goto out; 4898 status = nfserr_inval; 4899 if (!test_access(od->od_share_access, stp)) { 4900 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 4901 stp->st_access_bmap, od->od_share_access); 4902 goto put_stateid; 4903 } 4904 if (!test_deny(od->od_share_deny, stp)) { 4905 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 4906 stp->st_deny_bmap, od->od_share_deny); 4907 goto put_stateid; 4908 } 4909 nfs4_stateid_downgrade(stp, od->od_share_access); 4910 4911 reset_union_bmap_deny(od->od_share_deny, stp); 4912 4913 update_stateid(&stp->st_stid.sc_stateid); 4914 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4915 status = nfs_ok; 4916 put_stateid: 4917 nfs4_put_stid(&stp->st_stid); 4918 out: 4919 nfsd4_bump_seqid(cstate, status); 4920 return status; 4921 } 4922 4923 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 4924 { 4925 struct nfs4_client *clp = s->st_stid.sc_client; 4926 LIST_HEAD(reaplist); 4927 4928 s->st_stid.sc_type = NFS4_CLOSED_STID; 4929 spin_lock(&clp->cl_lock); 4930 unhash_open_stateid(s, &reaplist); 4931 4932 if (clp->cl_minorversion) { 4933 put_ol_stateid_locked(s, &reaplist); 4934 spin_unlock(&clp->cl_lock); 4935 free_ol_stateid_reaplist(&reaplist); 4936 } else { 4937 spin_unlock(&clp->cl_lock); 4938 free_ol_stateid_reaplist(&reaplist); 4939 move_to_close_lru(s, clp->net); 4940 } 4941 } 4942 4943 /* 4944 * nfs4_unlock_state() called after encode 4945 */ 4946 __be32 4947 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4948 struct nfsd4_close *close) 4949 { 4950 __be32 status; 4951 struct nfs4_ol_stateid *stp; 4952 struct net *net = SVC_NET(rqstp); 4953 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4954 4955 dprintk("NFSD: nfsd4_close on file %pd\n", 4956 cstate->current_fh.fh_dentry); 4957 4958 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 4959 &close->cl_stateid, 4960 NFS4_OPEN_STID|NFS4_CLOSED_STID, 4961 &stp, nn); 4962 nfsd4_bump_seqid(cstate, status); 4963 if (status) 4964 goto out; 4965 update_stateid(&stp->st_stid.sc_stateid); 4966 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4967 4968 nfsd4_close_open_stateid(stp); 4969 4970 /* put reference from nfs4_preprocess_seqid_op */ 4971 nfs4_put_stid(&stp->st_stid); 4972 out: 4973 return status; 4974 } 4975 4976 __be32 4977 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4978 struct nfsd4_delegreturn *dr) 4979 { 4980 struct nfs4_delegation *dp; 4981 stateid_t *stateid = &dr->dr_stateid; 4982 struct nfs4_stid *s; 4983 __be32 status; 4984 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4985 4986 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 4987 return status; 4988 4989 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); 4990 if (status) 4991 goto out; 4992 dp = delegstateid(s); 4993 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); 4994 if (status) 4995 goto put_stateid; 4996 4997 destroy_delegation(dp); 4998 put_stateid: 4999 nfs4_put_stid(&dp->dl_stid); 5000 out: 5001 return status; 5002 } 5003 5004 5005 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 5006 5007 static inline u64 5008 end_offset(u64 start, u64 len) 5009 { 5010 u64 end; 5011 5012 end = start + len; 5013 return end >= start ? end: NFS4_MAX_UINT64; 5014 } 5015 5016 /* last octet in a range */ 5017 static inline u64 5018 last_byte_offset(u64 start, u64 len) 5019 { 5020 u64 end; 5021 5022 WARN_ON_ONCE(!len); 5023 end = start + len; 5024 return end > start ? end - 1: NFS4_MAX_UINT64; 5025 } 5026 5027 /* 5028 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 5029 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 5030 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 5031 * locking, this prevents us from being completely protocol-compliant. The 5032 * real solution to this problem is to start using unsigned file offsets in 5033 * the VFS, but this is a very deep change! 5034 */ 5035 static inline void 5036 nfs4_transform_lock_offset(struct file_lock *lock) 5037 { 5038 if (lock->fl_start < 0) 5039 lock->fl_start = OFFSET_MAX; 5040 if (lock->fl_end < 0) 5041 lock->fl_end = OFFSET_MAX; 5042 } 5043 5044 static fl_owner_t 5045 nfsd4_fl_get_owner(fl_owner_t owner) 5046 { 5047 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 5048 5049 nfs4_get_stateowner(&lo->lo_owner); 5050 return owner; 5051 } 5052 5053 static void 5054 nfsd4_fl_put_owner(fl_owner_t owner) 5055 { 5056 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 5057 5058 if (lo) 5059 nfs4_put_stateowner(&lo->lo_owner); 5060 } 5061 5062 static const struct lock_manager_operations nfsd_posix_mng_ops = { 5063 .lm_get_owner = nfsd4_fl_get_owner, 5064 .lm_put_owner = nfsd4_fl_put_owner, 5065 }; 5066 5067 static inline void 5068 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 5069 { 5070 struct nfs4_lockowner *lo; 5071 5072 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 5073 lo = (struct nfs4_lockowner *) fl->fl_owner; 5074 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, 5075 lo->lo_owner.so_owner.len, GFP_KERNEL); 5076 if (!deny->ld_owner.data) 5077 /* We just don't care that much */ 5078 goto nevermind; 5079 deny->ld_owner.len = lo->lo_owner.so_owner.len; 5080 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 5081 } else { 5082 nevermind: 5083 deny->ld_owner.len = 0; 5084 deny->ld_owner.data = NULL; 5085 deny->ld_clientid.cl_boot = 0; 5086 deny->ld_clientid.cl_id = 0; 5087 } 5088 deny->ld_start = fl->fl_start; 5089 deny->ld_length = NFS4_MAX_UINT64; 5090 if (fl->fl_end != NFS4_MAX_UINT64) 5091 deny->ld_length = fl->fl_end - fl->fl_start + 1; 5092 deny->ld_type = NFS4_READ_LT; 5093 if (fl->fl_type != F_RDLCK) 5094 deny->ld_type = NFS4_WRITE_LT; 5095 } 5096 5097 static struct nfs4_lockowner * 5098 find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner, 5099 struct nfs4_client *clp) 5100 { 5101 unsigned int strhashval = ownerstr_hashval(owner); 5102 struct nfs4_stateowner *so; 5103 5104 lockdep_assert_held(&clp->cl_lock); 5105 5106 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 5107 so_strhash) { 5108 if (so->so_is_open_owner) 5109 continue; 5110 if (same_owner_str(so, owner)) 5111 return lockowner(nfs4_get_stateowner(so)); 5112 } 5113 return NULL; 5114 } 5115 5116 static struct nfs4_lockowner * 5117 find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner, 5118 struct nfs4_client *clp) 5119 { 5120 struct nfs4_lockowner *lo; 5121 5122 spin_lock(&clp->cl_lock); 5123 lo = find_lockowner_str_locked(clid, owner, clp); 5124 spin_unlock(&clp->cl_lock); 5125 return lo; 5126 } 5127 5128 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 5129 { 5130 unhash_lockowner_locked(lockowner(sop)); 5131 } 5132 5133 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 5134 { 5135 struct nfs4_lockowner *lo = lockowner(sop); 5136 5137 kmem_cache_free(lockowner_slab, lo); 5138 } 5139 5140 static const struct nfs4_stateowner_operations lockowner_ops = { 5141 .so_unhash = nfs4_unhash_lockowner, 5142 .so_free = nfs4_free_lockowner, 5143 }; 5144 5145 /* 5146 * Alloc a lock owner structure. 5147 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 5148 * occurred. 5149 * 5150 * strhashval = ownerstr_hashval 5151 */ 5152 static struct nfs4_lockowner * 5153 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 5154 struct nfs4_ol_stateid *open_stp, 5155 struct nfsd4_lock *lock) 5156 { 5157 struct nfs4_lockowner *lo, *ret; 5158 5159 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 5160 if (!lo) 5161 return NULL; 5162 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 5163 lo->lo_owner.so_is_open_owner = 0; 5164 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 5165 lo->lo_owner.so_ops = &lockowner_ops; 5166 spin_lock(&clp->cl_lock); 5167 ret = find_lockowner_str_locked(&clp->cl_clientid, 5168 &lock->lk_new_owner, clp); 5169 if (ret == NULL) { 5170 list_add(&lo->lo_owner.so_strhash, 5171 &clp->cl_ownerstr_hashtbl[strhashval]); 5172 ret = lo; 5173 } else 5174 nfs4_free_lockowner(&lo->lo_owner); 5175 spin_unlock(&clp->cl_lock); 5176 return ret; 5177 } 5178 5179 static void 5180 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 5181 struct nfs4_file *fp, struct inode *inode, 5182 struct nfs4_ol_stateid *open_stp) 5183 { 5184 struct nfs4_client *clp = lo->lo_owner.so_client; 5185 5186 lockdep_assert_held(&clp->cl_lock); 5187 5188 atomic_inc(&stp->st_stid.sc_count); 5189 stp->st_stid.sc_type = NFS4_LOCK_STID; 5190 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 5191 get_nfs4_file(fp); 5192 stp->st_stid.sc_file = fp; 5193 stp->st_stid.sc_free = nfs4_free_lock_stateid; 5194 stp->st_access_bmap = 0; 5195 stp->st_deny_bmap = open_stp->st_deny_bmap; 5196 stp->st_openstp = open_stp; 5197 list_add(&stp->st_locks, &open_stp->st_locks); 5198 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 5199 spin_lock(&fp->fi_lock); 5200 list_add(&stp->st_perfile, &fp->fi_stateids); 5201 spin_unlock(&fp->fi_lock); 5202 } 5203 5204 static struct nfs4_ol_stateid * 5205 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) 5206 { 5207 struct nfs4_ol_stateid *lst; 5208 struct nfs4_client *clp = lo->lo_owner.so_client; 5209 5210 lockdep_assert_held(&clp->cl_lock); 5211 5212 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { 5213 if (lst->st_stid.sc_file == fp) { 5214 atomic_inc(&lst->st_stid.sc_count); 5215 return lst; 5216 } 5217 } 5218 return NULL; 5219 } 5220 5221 static struct nfs4_ol_stateid * 5222 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 5223 struct inode *inode, struct nfs4_ol_stateid *ost, 5224 bool *new) 5225 { 5226 struct nfs4_stid *ns = NULL; 5227 struct nfs4_ol_stateid *lst; 5228 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 5229 struct nfs4_client *clp = oo->oo_owner.so_client; 5230 5231 spin_lock(&clp->cl_lock); 5232 lst = find_lock_stateid(lo, fi); 5233 if (lst == NULL) { 5234 spin_unlock(&clp->cl_lock); 5235 ns = nfs4_alloc_stid(clp, stateid_slab); 5236 if (ns == NULL) 5237 return NULL; 5238 5239 spin_lock(&clp->cl_lock); 5240 lst = find_lock_stateid(lo, fi); 5241 if (likely(!lst)) { 5242 lst = openlockstateid(ns); 5243 init_lock_stateid(lst, lo, fi, inode, ost); 5244 ns = NULL; 5245 *new = true; 5246 } 5247 } 5248 spin_unlock(&clp->cl_lock); 5249 if (ns) 5250 nfs4_put_stid(ns); 5251 return lst; 5252 } 5253 5254 static int 5255 check_lock_length(u64 offset, u64 length) 5256 { 5257 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 5258 LOFF_OVERFLOW(offset, length))); 5259 } 5260 5261 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 5262 { 5263 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 5264 5265 lockdep_assert_held(&fp->fi_lock); 5266 5267 if (test_access(access, lock_stp)) 5268 return; 5269 __nfs4_file_get_access(fp, access); 5270 set_access(access, lock_stp); 5271 } 5272 5273 static __be32 5274 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 5275 struct nfs4_ol_stateid *ost, 5276 struct nfsd4_lock *lock, 5277 struct nfs4_ol_stateid **lst, bool *new) 5278 { 5279 __be32 status; 5280 struct nfs4_file *fi = ost->st_stid.sc_file; 5281 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 5282 struct nfs4_client *cl = oo->oo_owner.so_client; 5283 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 5284 struct nfs4_lockowner *lo; 5285 unsigned int strhashval; 5286 5287 lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl); 5288 if (!lo) { 5289 strhashval = ownerstr_hashval(&lock->v.new.owner); 5290 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 5291 if (lo == NULL) 5292 return nfserr_jukebox; 5293 } else { 5294 /* with an existing lockowner, seqids must be the same */ 5295 status = nfserr_bad_seqid; 5296 if (!cstate->minorversion && 5297 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 5298 goto out; 5299 } 5300 5301 *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 5302 if (*lst == NULL) { 5303 status = nfserr_jukebox; 5304 goto out; 5305 } 5306 status = nfs_ok; 5307 out: 5308 nfs4_put_stateowner(&lo->lo_owner); 5309 return status; 5310 } 5311 5312 /* 5313 * LOCK operation 5314 */ 5315 __be32 5316 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5317 struct nfsd4_lock *lock) 5318 { 5319 struct nfs4_openowner *open_sop = NULL; 5320 struct nfs4_lockowner *lock_sop = NULL; 5321 struct nfs4_ol_stateid *lock_stp = NULL; 5322 struct nfs4_ol_stateid *open_stp = NULL; 5323 struct nfs4_file *fp; 5324 struct file *filp = NULL; 5325 struct file_lock *file_lock = NULL; 5326 struct file_lock *conflock = NULL; 5327 __be32 status = 0; 5328 int lkflg; 5329 int err; 5330 bool new = false; 5331 struct net *net = SVC_NET(rqstp); 5332 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5333 5334 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 5335 (long long) lock->lk_offset, 5336 (long long) lock->lk_length); 5337 5338 if (check_lock_length(lock->lk_offset, lock->lk_length)) 5339 return nfserr_inval; 5340 5341 if ((status = fh_verify(rqstp, &cstate->current_fh, 5342 S_IFREG, NFSD_MAY_LOCK))) { 5343 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 5344 return status; 5345 } 5346 5347 if (lock->lk_is_new) { 5348 if (nfsd4_has_session(cstate)) 5349 /* See rfc 5661 18.10.3: given clientid is ignored: */ 5350 memcpy(&lock->v.new.clientid, 5351 &cstate->session->se_client->cl_clientid, 5352 sizeof(clientid_t)); 5353 5354 status = nfserr_stale_clientid; 5355 if (STALE_CLIENTID(&lock->lk_new_clientid, nn)) 5356 goto out; 5357 5358 /* validate and update open stateid and open seqid */ 5359 status = nfs4_preprocess_confirmed_seqid_op(cstate, 5360 lock->lk_new_open_seqid, 5361 &lock->lk_new_open_stateid, 5362 &open_stp, nn); 5363 if (status) 5364 goto out; 5365 open_sop = openowner(open_stp->st_stateowner); 5366 status = nfserr_bad_stateid; 5367 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 5368 &lock->v.new.clientid)) 5369 goto out; 5370 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5371 &lock_stp, &new); 5372 } else { 5373 status = nfs4_preprocess_seqid_op(cstate, 5374 lock->lk_old_lock_seqid, 5375 &lock->lk_old_lock_stateid, 5376 NFS4_LOCK_STID, &lock_stp, nn); 5377 } 5378 if (status) 5379 goto out; 5380 lock_sop = lockowner(lock_stp->st_stateowner); 5381 5382 lkflg = setlkflg(lock->lk_type); 5383 status = nfs4_check_openmode(lock_stp, lkflg); 5384 if (status) 5385 goto out; 5386 5387 status = nfserr_grace; 5388 if (locks_in_grace(net) && !lock->lk_reclaim) 5389 goto out; 5390 status = nfserr_no_grace; 5391 if (!locks_in_grace(net) && lock->lk_reclaim) 5392 goto out; 5393 5394 file_lock = locks_alloc_lock(); 5395 if (!file_lock) { 5396 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5397 status = nfserr_jukebox; 5398 goto out; 5399 } 5400 5401 fp = lock_stp->st_stid.sc_file; 5402 switch (lock->lk_type) { 5403 case NFS4_READ_LT: 5404 case NFS4_READW_LT: 5405 spin_lock(&fp->fi_lock); 5406 filp = find_readable_file_locked(fp); 5407 if (filp) 5408 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 5409 spin_unlock(&fp->fi_lock); 5410 file_lock->fl_type = F_RDLCK; 5411 break; 5412 case NFS4_WRITE_LT: 5413 case NFS4_WRITEW_LT: 5414 spin_lock(&fp->fi_lock); 5415 filp = find_writeable_file_locked(fp); 5416 if (filp) 5417 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 5418 spin_unlock(&fp->fi_lock); 5419 file_lock->fl_type = F_WRLCK; 5420 break; 5421 default: 5422 status = nfserr_inval; 5423 goto out; 5424 } 5425 if (!filp) { 5426 status = nfserr_openmode; 5427 goto out; 5428 } 5429 5430 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 5431 file_lock->fl_pid = current->tgid; 5432 file_lock->fl_file = filp; 5433 file_lock->fl_flags = FL_POSIX; 5434 file_lock->fl_lmops = &nfsd_posix_mng_ops; 5435 file_lock->fl_start = lock->lk_offset; 5436 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 5437 nfs4_transform_lock_offset(file_lock); 5438 5439 conflock = locks_alloc_lock(); 5440 if (!conflock) { 5441 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5442 status = nfserr_jukebox; 5443 goto out; 5444 } 5445 5446 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 5447 switch (-err) { 5448 case 0: /* success! */ 5449 update_stateid(&lock_stp->st_stid.sc_stateid); 5450 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 5451 sizeof(stateid_t)); 5452 status = 0; 5453 break; 5454 case (EAGAIN): /* conflock holds conflicting lock */ 5455 status = nfserr_denied; 5456 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 5457 nfs4_set_lock_denied(conflock, &lock->lk_denied); 5458 break; 5459 case (EDEADLK): 5460 status = nfserr_deadlock; 5461 break; 5462 default: 5463 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 5464 status = nfserrno(err); 5465 break; 5466 } 5467 out: 5468 if (filp) 5469 fput(filp); 5470 if (lock_stp) { 5471 /* Bump seqid manually if the 4.0 replay owner is openowner */ 5472 if (cstate->replay_owner && 5473 cstate->replay_owner != &lock_sop->lo_owner && 5474 seqid_mutating_err(ntohl(status))) 5475 lock_sop->lo_owner.so_seqid++; 5476 5477 /* 5478 * If this is a new, never-before-used stateid, and we are 5479 * returning an error, then just go ahead and release it. 5480 */ 5481 if (status && new) 5482 release_lock_stateid(lock_stp); 5483 5484 nfs4_put_stid(&lock_stp->st_stid); 5485 } 5486 if (open_stp) 5487 nfs4_put_stid(&open_stp->st_stid); 5488 nfsd4_bump_seqid(cstate, status); 5489 if (file_lock) 5490 locks_free_lock(file_lock); 5491 if (conflock) 5492 locks_free_lock(conflock); 5493 return status; 5494 } 5495 5496 /* 5497 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 5498 * so we do a temporary open here just to get an open file to pass to 5499 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 5500 * inode operation.) 5501 */ 5502 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 5503 { 5504 struct file *file; 5505 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 5506 if (!err) { 5507 err = nfserrno(vfs_test_lock(file, lock)); 5508 nfsd_close(file); 5509 } 5510 return err; 5511 } 5512 5513 /* 5514 * LOCKT operation 5515 */ 5516 __be32 5517 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5518 struct nfsd4_lockt *lockt) 5519 { 5520 struct file_lock *file_lock = NULL; 5521 struct nfs4_lockowner *lo = NULL; 5522 __be32 status; 5523 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5524 5525 if (locks_in_grace(SVC_NET(rqstp))) 5526 return nfserr_grace; 5527 5528 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 5529 return nfserr_inval; 5530 5531 if (!nfsd4_has_session(cstate)) { 5532 status = lookup_clientid(&lockt->lt_clientid, cstate, nn); 5533 if (status) 5534 goto out; 5535 } 5536 5537 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 5538 goto out; 5539 5540 file_lock = locks_alloc_lock(); 5541 if (!file_lock) { 5542 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5543 status = nfserr_jukebox; 5544 goto out; 5545 } 5546 5547 switch (lockt->lt_type) { 5548 case NFS4_READ_LT: 5549 case NFS4_READW_LT: 5550 file_lock->fl_type = F_RDLCK; 5551 break; 5552 case NFS4_WRITE_LT: 5553 case NFS4_WRITEW_LT: 5554 file_lock->fl_type = F_WRLCK; 5555 break; 5556 default: 5557 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 5558 status = nfserr_inval; 5559 goto out; 5560 } 5561 5562 lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, 5563 cstate->clp); 5564 if (lo) 5565 file_lock->fl_owner = (fl_owner_t)lo; 5566 file_lock->fl_pid = current->tgid; 5567 file_lock->fl_flags = FL_POSIX; 5568 5569 file_lock->fl_start = lockt->lt_offset; 5570 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 5571 5572 nfs4_transform_lock_offset(file_lock); 5573 5574 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 5575 if (status) 5576 goto out; 5577 5578 if (file_lock->fl_type != F_UNLCK) { 5579 status = nfserr_denied; 5580 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 5581 } 5582 out: 5583 if (lo) 5584 nfs4_put_stateowner(&lo->lo_owner); 5585 if (file_lock) 5586 locks_free_lock(file_lock); 5587 return status; 5588 } 5589 5590 __be32 5591 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5592 struct nfsd4_locku *locku) 5593 { 5594 struct nfs4_ol_stateid *stp; 5595 struct file *filp = NULL; 5596 struct file_lock *file_lock = NULL; 5597 __be32 status; 5598 int err; 5599 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5600 5601 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 5602 (long long) locku->lu_offset, 5603 (long long) locku->lu_length); 5604 5605 if (check_lock_length(locku->lu_offset, locku->lu_length)) 5606 return nfserr_inval; 5607 5608 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 5609 &locku->lu_stateid, NFS4_LOCK_STID, 5610 &stp, nn); 5611 if (status) 5612 goto out; 5613 filp = find_any_file(stp->st_stid.sc_file); 5614 if (!filp) { 5615 status = nfserr_lock_range; 5616 goto put_stateid; 5617 } 5618 file_lock = locks_alloc_lock(); 5619 if (!file_lock) { 5620 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5621 status = nfserr_jukebox; 5622 goto fput; 5623 } 5624 5625 file_lock->fl_type = F_UNLCK; 5626 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 5627 file_lock->fl_pid = current->tgid; 5628 file_lock->fl_file = filp; 5629 file_lock->fl_flags = FL_POSIX; 5630 file_lock->fl_lmops = &nfsd_posix_mng_ops; 5631 file_lock->fl_start = locku->lu_offset; 5632 5633 file_lock->fl_end = last_byte_offset(locku->lu_offset, 5634 locku->lu_length); 5635 nfs4_transform_lock_offset(file_lock); 5636 5637 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); 5638 if (err) { 5639 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 5640 goto out_nfserr; 5641 } 5642 update_stateid(&stp->st_stid.sc_stateid); 5643 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 5644 fput: 5645 fput(filp); 5646 put_stateid: 5647 nfs4_put_stid(&stp->st_stid); 5648 out: 5649 nfsd4_bump_seqid(cstate, status); 5650 if (file_lock) 5651 locks_free_lock(file_lock); 5652 return status; 5653 5654 out_nfserr: 5655 status = nfserrno(err); 5656 goto fput; 5657 } 5658 5659 /* 5660 * returns 5661 * true: locks held by lockowner 5662 * false: no locks held by lockowner 5663 */ 5664 static bool 5665 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 5666 { 5667 struct file_lock *fl; 5668 int status = false; 5669 struct file *filp = find_any_file(fp); 5670 struct inode *inode; 5671 struct file_lock_context *flctx; 5672 5673 if (!filp) { 5674 /* Any valid lock stateid should have some sort of access */ 5675 WARN_ON_ONCE(1); 5676 return status; 5677 } 5678 5679 inode = file_inode(filp); 5680 flctx = inode->i_flctx; 5681 5682 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 5683 spin_lock(&flctx->flc_lock); 5684 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 5685 if (fl->fl_owner == (fl_owner_t)lowner) { 5686 status = true; 5687 break; 5688 } 5689 } 5690 spin_unlock(&flctx->flc_lock); 5691 } 5692 fput(filp); 5693 return status; 5694 } 5695 5696 __be32 5697 nfsd4_release_lockowner(struct svc_rqst *rqstp, 5698 struct nfsd4_compound_state *cstate, 5699 struct nfsd4_release_lockowner *rlockowner) 5700 { 5701 clientid_t *clid = &rlockowner->rl_clientid; 5702 struct nfs4_stateowner *sop; 5703 struct nfs4_lockowner *lo = NULL; 5704 struct nfs4_ol_stateid *stp; 5705 struct xdr_netobj *owner = &rlockowner->rl_owner; 5706 unsigned int hashval = ownerstr_hashval(owner); 5707 __be32 status; 5708 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5709 struct nfs4_client *clp; 5710 5711 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 5712 clid->cl_boot, clid->cl_id); 5713 5714 status = lookup_clientid(clid, cstate, nn); 5715 if (status) 5716 return status; 5717 5718 clp = cstate->clp; 5719 /* Find the matching lock stateowner */ 5720 spin_lock(&clp->cl_lock); 5721 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval], 5722 so_strhash) { 5723 5724 if (sop->so_is_open_owner || !same_owner_str(sop, owner)) 5725 continue; 5726 5727 /* see if there are still any locks associated with it */ 5728 lo = lockowner(sop); 5729 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { 5730 if (check_for_locks(stp->st_stid.sc_file, lo)) { 5731 status = nfserr_locks_held; 5732 spin_unlock(&clp->cl_lock); 5733 return status; 5734 } 5735 } 5736 5737 nfs4_get_stateowner(sop); 5738 break; 5739 } 5740 spin_unlock(&clp->cl_lock); 5741 if (lo) 5742 release_lockowner(lo); 5743 return status; 5744 } 5745 5746 static inline struct nfs4_client_reclaim * 5747 alloc_reclaim(void) 5748 { 5749 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 5750 } 5751 5752 bool 5753 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn) 5754 { 5755 struct nfs4_client_reclaim *crp; 5756 5757 crp = nfsd4_find_reclaim_client(name, nn); 5758 return (crp && crp->cr_clp); 5759 } 5760 5761 /* 5762 * failure => all reset bets are off, nfserr_no_grace... 5763 */ 5764 struct nfs4_client_reclaim * 5765 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn) 5766 { 5767 unsigned int strhashval; 5768 struct nfs4_client_reclaim *crp; 5769 5770 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 5771 crp = alloc_reclaim(); 5772 if (crp) { 5773 strhashval = clientstr_hashval(name); 5774 INIT_LIST_HEAD(&crp->cr_strhash); 5775 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 5776 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 5777 crp->cr_clp = NULL; 5778 nn->reclaim_str_hashtbl_size++; 5779 } 5780 return crp; 5781 } 5782 5783 void 5784 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 5785 { 5786 list_del(&crp->cr_strhash); 5787 kfree(crp); 5788 nn->reclaim_str_hashtbl_size--; 5789 } 5790 5791 void 5792 nfs4_release_reclaim(struct nfsd_net *nn) 5793 { 5794 struct nfs4_client_reclaim *crp = NULL; 5795 int i; 5796 5797 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 5798 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 5799 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 5800 struct nfs4_client_reclaim, cr_strhash); 5801 nfs4_remove_reclaim_record(crp, nn); 5802 } 5803 } 5804 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 5805 } 5806 5807 /* 5808 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 5809 struct nfs4_client_reclaim * 5810 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) 5811 { 5812 unsigned int strhashval; 5813 struct nfs4_client_reclaim *crp = NULL; 5814 5815 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir); 5816 5817 strhashval = clientstr_hashval(recdir); 5818 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 5819 if (same_name(crp->cr_recdir, recdir)) { 5820 return crp; 5821 } 5822 } 5823 return NULL; 5824 } 5825 5826 /* 5827 * Called from OPEN. Look for clientid in reclaim list. 5828 */ 5829 __be32 5830 nfs4_check_open_reclaim(clientid_t *clid, 5831 struct nfsd4_compound_state *cstate, 5832 struct nfsd_net *nn) 5833 { 5834 __be32 status; 5835 5836 /* find clientid in conf_id_hashtbl */ 5837 status = lookup_clientid(clid, cstate, nn); 5838 if (status) 5839 return nfserr_reclaim_bad; 5840 5841 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags)) 5842 return nfserr_no_grace; 5843 5844 if (nfsd4_client_record_check(cstate->clp)) 5845 return nfserr_reclaim_bad; 5846 5847 return nfs_ok; 5848 } 5849 5850 #ifdef CONFIG_NFSD_FAULT_INJECTION 5851 static inline void 5852 put_client(struct nfs4_client *clp) 5853 { 5854 atomic_dec(&clp->cl_refcount); 5855 } 5856 5857 static struct nfs4_client * 5858 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) 5859 { 5860 struct nfs4_client *clp; 5861 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 5862 nfsd_net_id); 5863 5864 if (!nfsd_netns_ready(nn)) 5865 return NULL; 5866 5867 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 5868 if (memcmp(&clp->cl_addr, addr, addr_size) == 0) 5869 return clp; 5870 } 5871 return NULL; 5872 } 5873 5874 u64 5875 nfsd_inject_print_clients(void) 5876 { 5877 struct nfs4_client *clp; 5878 u64 count = 0; 5879 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 5880 nfsd_net_id); 5881 char buf[INET6_ADDRSTRLEN]; 5882 5883 if (!nfsd_netns_ready(nn)) 5884 return 0; 5885 5886 spin_lock(&nn->client_lock); 5887 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 5888 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 5889 pr_info("NFS Client: %s\n", buf); 5890 ++count; 5891 } 5892 spin_unlock(&nn->client_lock); 5893 5894 return count; 5895 } 5896 5897 u64 5898 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size) 5899 { 5900 u64 count = 0; 5901 struct nfs4_client *clp; 5902 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 5903 nfsd_net_id); 5904 5905 if (!nfsd_netns_ready(nn)) 5906 return count; 5907 5908 spin_lock(&nn->client_lock); 5909 clp = nfsd_find_client(addr, addr_size); 5910 if (clp) { 5911 if (mark_client_expired_locked(clp) == nfs_ok) 5912 ++count; 5913 else 5914 clp = NULL; 5915 } 5916 spin_unlock(&nn->client_lock); 5917 5918 if (clp) 5919 expire_client(clp); 5920 5921 return count; 5922 } 5923 5924 u64 5925 nfsd_inject_forget_clients(u64 max) 5926 { 5927 u64 count = 0; 5928 struct nfs4_client *clp, *next; 5929 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 5930 nfsd_net_id); 5931 LIST_HEAD(reaplist); 5932 5933 if (!nfsd_netns_ready(nn)) 5934 return count; 5935 5936 spin_lock(&nn->client_lock); 5937 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 5938 if (mark_client_expired_locked(clp) == nfs_ok) { 5939 list_add(&clp->cl_lru, &reaplist); 5940 if (max != 0 && ++count >= max) 5941 break; 5942 } 5943 } 5944 spin_unlock(&nn->client_lock); 5945 5946 list_for_each_entry_safe(clp, next, &reaplist, cl_lru) 5947 expire_client(clp); 5948 5949 return count; 5950 } 5951 5952 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, 5953 const char *type) 5954 { 5955 char buf[INET6_ADDRSTRLEN]; 5956 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 5957 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); 5958 } 5959 5960 static void 5961 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, 5962 struct list_head *collect) 5963 { 5964 struct nfs4_client *clp = lst->st_stid.sc_client; 5965 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 5966 nfsd_net_id); 5967 5968 if (!collect) 5969 return; 5970 5971 lockdep_assert_held(&nn->client_lock); 5972 atomic_inc(&clp->cl_refcount); 5973 list_add(&lst->st_locks, collect); 5974 } 5975 5976 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, 5977 struct list_head *collect, 5978 void (*func)(struct nfs4_ol_stateid *)) 5979 { 5980 struct nfs4_openowner *oop; 5981 struct nfs4_ol_stateid *stp, *st_next; 5982 struct nfs4_ol_stateid *lst, *lst_next; 5983 u64 count = 0; 5984 5985 spin_lock(&clp->cl_lock); 5986 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { 5987 list_for_each_entry_safe(stp, st_next, 5988 &oop->oo_owner.so_stateids, st_perstateowner) { 5989 list_for_each_entry_safe(lst, lst_next, 5990 &stp->st_locks, st_locks) { 5991 if (func) { 5992 func(lst); 5993 nfsd_inject_add_lock_to_list(lst, 5994 collect); 5995 } 5996 ++count; 5997 /* 5998 * Despite the fact that these functions deal 5999 * with 64-bit integers for "count", we must 6000 * ensure that it doesn't blow up the 6001 * clp->cl_refcount. Throw a warning if we 6002 * start to approach INT_MAX here. 6003 */ 6004 WARN_ON_ONCE(count == (INT_MAX / 2)); 6005 if (count == max) 6006 goto out; 6007 } 6008 } 6009 } 6010 out: 6011 spin_unlock(&clp->cl_lock); 6012 6013 return count; 6014 } 6015 6016 static u64 6017 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect, 6018 u64 max) 6019 { 6020 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid); 6021 } 6022 6023 static u64 6024 nfsd_print_client_locks(struct nfs4_client *clp) 6025 { 6026 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL); 6027 nfsd_print_count(clp, count, "locked files"); 6028 return count; 6029 } 6030 6031 u64 6032 nfsd_inject_print_locks(void) 6033 { 6034 struct nfs4_client *clp; 6035 u64 count = 0; 6036 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6037 nfsd_net_id); 6038 6039 if (!nfsd_netns_ready(nn)) 6040 return 0; 6041 6042 spin_lock(&nn->client_lock); 6043 list_for_each_entry(clp, &nn->client_lru, cl_lru) 6044 count += nfsd_print_client_locks(clp); 6045 spin_unlock(&nn->client_lock); 6046 6047 return count; 6048 } 6049 6050 static void 6051 nfsd_reap_locks(struct list_head *reaplist) 6052 { 6053 struct nfs4_client *clp; 6054 struct nfs4_ol_stateid *stp, *next; 6055 6056 list_for_each_entry_safe(stp, next, reaplist, st_locks) { 6057 list_del_init(&stp->st_locks); 6058 clp = stp->st_stid.sc_client; 6059 nfs4_put_stid(&stp->st_stid); 6060 put_client(clp); 6061 } 6062 } 6063 6064 u64 6065 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size) 6066 { 6067 unsigned int count = 0; 6068 struct nfs4_client *clp; 6069 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6070 nfsd_net_id); 6071 LIST_HEAD(reaplist); 6072 6073 if (!nfsd_netns_ready(nn)) 6074 return count; 6075 6076 spin_lock(&nn->client_lock); 6077 clp = nfsd_find_client(addr, addr_size); 6078 if (clp) 6079 count = nfsd_collect_client_locks(clp, &reaplist, 0); 6080 spin_unlock(&nn->client_lock); 6081 nfsd_reap_locks(&reaplist); 6082 return count; 6083 } 6084 6085 u64 6086 nfsd_inject_forget_locks(u64 max) 6087 { 6088 u64 count = 0; 6089 struct nfs4_client *clp; 6090 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6091 nfsd_net_id); 6092 LIST_HEAD(reaplist); 6093 6094 if (!nfsd_netns_ready(nn)) 6095 return count; 6096 6097 spin_lock(&nn->client_lock); 6098 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6099 count += nfsd_collect_client_locks(clp, &reaplist, max - count); 6100 if (max != 0 && count >= max) 6101 break; 6102 } 6103 spin_unlock(&nn->client_lock); 6104 nfsd_reap_locks(&reaplist); 6105 return count; 6106 } 6107 6108 static u64 6109 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, 6110 struct list_head *collect, 6111 void (*func)(struct nfs4_openowner *)) 6112 { 6113 struct nfs4_openowner *oop, *next; 6114 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6115 nfsd_net_id); 6116 u64 count = 0; 6117 6118 lockdep_assert_held(&nn->client_lock); 6119 6120 spin_lock(&clp->cl_lock); 6121 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { 6122 if (func) { 6123 func(oop); 6124 if (collect) { 6125 atomic_inc(&clp->cl_refcount); 6126 list_add(&oop->oo_perclient, collect); 6127 } 6128 } 6129 ++count; 6130 /* 6131 * Despite the fact that these functions deal with 6132 * 64-bit integers for "count", we must ensure that 6133 * it doesn't blow up the clp->cl_refcount. Throw a 6134 * warning if we start to approach INT_MAX here. 6135 */ 6136 WARN_ON_ONCE(count == (INT_MAX / 2)); 6137 if (count == max) 6138 break; 6139 } 6140 spin_unlock(&clp->cl_lock); 6141 6142 return count; 6143 } 6144 6145 static u64 6146 nfsd_print_client_openowners(struct nfs4_client *clp) 6147 { 6148 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL); 6149 6150 nfsd_print_count(clp, count, "openowners"); 6151 return count; 6152 } 6153 6154 static u64 6155 nfsd_collect_client_openowners(struct nfs4_client *clp, 6156 struct list_head *collect, u64 max) 6157 { 6158 return nfsd_foreach_client_openowner(clp, max, collect, 6159 unhash_openowner_locked); 6160 } 6161 6162 u64 6163 nfsd_inject_print_openowners(void) 6164 { 6165 struct nfs4_client *clp; 6166 u64 count = 0; 6167 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6168 nfsd_net_id); 6169 6170 if (!nfsd_netns_ready(nn)) 6171 return 0; 6172 6173 spin_lock(&nn->client_lock); 6174 list_for_each_entry(clp, &nn->client_lru, cl_lru) 6175 count += nfsd_print_client_openowners(clp); 6176 spin_unlock(&nn->client_lock); 6177 6178 return count; 6179 } 6180 6181 static void 6182 nfsd_reap_openowners(struct list_head *reaplist) 6183 { 6184 struct nfs4_client *clp; 6185 struct nfs4_openowner *oop, *next; 6186 6187 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) { 6188 list_del_init(&oop->oo_perclient); 6189 clp = oop->oo_owner.so_client; 6190 release_openowner(oop); 6191 put_client(clp); 6192 } 6193 } 6194 6195 u64 6196 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr, 6197 size_t addr_size) 6198 { 6199 unsigned int count = 0; 6200 struct nfs4_client *clp; 6201 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6202 nfsd_net_id); 6203 LIST_HEAD(reaplist); 6204 6205 if (!nfsd_netns_ready(nn)) 6206 return count; 6207 6208 spin_lock(&nn->client_lock); 6209 clp = nfsd_find_client(addr, addr_size); 6210 if (clp) 6211 count = nfsd_collect_client_openowners(clp, &reaplist, 0); 6212 spin_unlock(&nn->client_lock); 6213 nfsd_reap_openowners(&reaplist); 6214 return count; 6215 } 6216 6217 u64 6218 nfsd_inject_forget_openowners(u64 max) 6219 { 6220 u64 count = 0; 6221 struct nfs4_client *clp; 6222 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6223 nfsd_net_id); 6224 LIST_HEAD(reaplist); 6225 6226 if (!nfsd_netns_ready(nn)) 6227 return count; 6228 6229 spin_lock(&nn->client_lock); 6230 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6231 count += nfsd_collect_client_openowners(clp, &reaplist, 6232 max - count); 6233 if (max != 0 && count >= max) 6234 break; 6235 } 6236 spin_unlock(&nn->client_lock); 6237 nfsd_reap_openowners(&reaplist); 6238 return count; 6239 } 6240 6241 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, 6242 struct list_head *victims) 6243 { 6244 struct nfs4_delegation *dp, *next; 6245 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6246 nfsd_net_id); 6247 u64 count = 0; 6248 6249 lockdep_assert_held(&nn->client_lock); 6250 6251 spin_lock(&state_lock); 6252 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { 6253 if (victims) { 6254 /* 6255 * It's not safe to mess with delegations that have a 6256 * non-zero dl_time. They might have already been broken 6257 * and could be processed by the laundromat outside of 6258 * the state_lock. Just leave them be. 6259 */ 6260 if (dp->dl_time != 0) 6261 continue; 6262 6263 atomic_inc(&clp->cl_refcount); 6264 unhash_delegation_locked(dp); 6265 list_add(&dp->dl_recall_lru, victims); 6266 } 6267 ++count; 6268 /* 6269 * Despite the fact that these functions deal with 6270 * 64-bit integers for "count", we must ensure that 6271 * it doesn't blow up the clp->cl_refcount. Throw a 6272 * warning if we start to approach INT_MAX here. 6273 */ 6274 WARN_ON_ONCE(count == (INT_MAX / 2)); 6275 if (count == max) 6276 break; 6277 } 6278 spin_unlock(&state_lock); 6279 return count; 6280 } 6281 6282 static u64 6283 nfsd_print_client_delegations(struct nfs4_client *clp) 6284 { 6285 u64 count = nfsd_find_all_delegations(clp, 0, NULL); 6286 6287 nfsd_print_count(clp, count, "delegations"); 6288 return count; 6289 } 6290 6291 u64 6292 nfsd_inject_print_delegations(void) 6293 { 6294 struct nfs4_client *clp; 6295 u64 count = 0; 6296 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6297 nfsd_net_id); 6298 6299 if (!nfsd_netns_ready(nn)) 6300 return 0; 6301 6302 spin_lock(&nn->client_lock); 6303 list_for_each_entry(clp, &nn->client_lru, cl_lru) 6304 count += nfsd_print_client_delegations(clp); 6305 spin_unlock(&nn->client_lock); 6306 6307 return count; 6308 } 6309 6310 static void 6311 nfsd_forget_delegations(struct list_head *reaplist) 6312 { 6313 struct nfs4_client *clp; 6314 struct nfs4_delegation *dp, *next; 6315 6316 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { 6317 list_del_init(&dp->dl_recall_lru); 6318 clp = dp->dl_stid.sc_client; 6319 revoke_delegation(dp); 6320 put_client(clp); 6321 } 6322 } 6323 6324 u64 6325 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr, 6326 size_t addr_size) 6327 { 6328 u64 count = 0; 6329 struct nfs4_client *clp; 6330 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6331 nfsd_net_id); 6332 LIST_HEAD(reaplist); 6333 6334 if (!nfsd_netns_ready(nn)) 6335 return count; 6336 6337 spin_lock(&nn->client_lock); 6338 clp = nfsd_find_client(addr, addr_size); 6339 if (clp) 6340 count = nfsd_find_all_delegations(clp, 0, &reaplist); 6341 spin_unlock(&nn->client_lock); 6342 6343 nfsd_forget_delegations(&reaplist); 6344 return count; 6345 } 6346 6347 u64 6348 nfsd_inject_forget_delegations(u64 max) 6349 { 6350 u64 count = 0; 6351 struct nfs4_client *clp; 6352 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6353 nfsd_net_id); 6354 LIST_HEAD(reaplist); 6355 6356 if (!nfsd_netns_ready(nn)) 6357 return count; 6358 6359 spin_lock(&nn->client_lock); 6360 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6361 count += nfsd_find_all_delegations(clp, max - count, &reaplist); 6362 if (max != 0 && count >= max) 6363 break; 6364 } 6365 spin_unlock(&nn->client_lock); 6366 nfsd_forget_delegations(&reaplist); 6367 return count; 6368 } 6369 6370 static void 6371 nfsd_recall_delegations(struct list_head *reaplist) 6372 { 6373 struct nfs4_client *clp; 6374 struct nfs4_delegation *dp, *next; 6375 6376 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { 6377 list_del_init(&dp->dl_recall_lru); 6378 clp = dp->dl_stid.sc_client; 6379 /* 6380 * We skipped all entries that had a zero dl_time before, 6381 * so we can now reset the dl_time back to 0. If a delegation 6382 * break comes in now, then it won't make any difference since 6383 * we're recalling it either way. 6384 */ 6385 spin_lock(&state_lock); 6386 dp->dl_time = 0; 6387 spin_unlock(&state_lock); 6388 nfsd_break_one_deleg(dp); 6389 put_client(clp); 6390 } 6391 } 6392 6393 u64 6394 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr, 6395 size_t addr_size) 6396 { 6397 u64 count = 0; 6398 struct nfs4_client *clp; 6399 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6400 nfsd_net_id); 6401 LIST_HEAD(reaplist); 6402 6403 if (!nfsd_netns_ready(nn)) 6404 return count; 6405 6406 spin_lock(&nn->client_lock); 6407 clp = nfsd_find_client(addr, addr_size); 6408 if (clp) 6409 count = nfsd_find_all_delegations(clp, 0, &reaplist); 6410 spin_unlock(&nn->client_lock); 6411 6412 nfsd_recall_delegations(&reaplist); 6413 return count; 6414 } 6415 6416 u64 6417 nfsd_inject_recall_delegations(u64 max) 6418 { 6419 u64 count = 0; 6420 struct nfs4_client *clp, *next; 6421 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, 6422 nfsd_net_id); 6423 LIST_HEAD(reaplist); 6424 6425 if (!nfsd_netns_ready(nn)) 6426 return count; 6427 6428 spin_lock(&nn->client_lock); 6429 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 6430 count += nfsd_find_all_delegations(clp, max - count, &reaplist); 6431 if (max != 0 && ++count >= max) 6432 break; 6433 } 6434 spin_unlock(&nn->client_lock); 6435 nfsd_recall_delegations(&reaplist); 6436 return count; 6437 } 6438 #endif /* CONFIG_NFSD_FAULT_INJECTION */ 6439 6440 /* 6441 * Since the lifetime of a delegation isn't limited to that of an open, a 6442 * client may quite reasonably hang on to a delegation as long as it has 6443 * the inode cached. This becomes an obvious problem the first time a 6444 * client's inode cache approaches the size of the server's total memory. 6445 * 6446 * For now we avoid this problem by imposing a hard limit on the number 6447 * of delegations, which varies according to the server's memory size. 6448 */ 6449 static void 6450 set_max_delegations(void) 6451 { 6452 /* 6453 * Allow at most 4 delegations per megabyte of RAM. Quick 6454 * estimates suggest that in the worst case (where every delegation 6455 * is for a different inode), a delegation could take about 1.5K, 6456 * giving a worst case usage of about 6% of memory. 6457 */ 6458 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 6459 } 6460 6461 static int nfs4_state_create_net(struct net *net) 6462 { 6463 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6464 int i; 6465 6466 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) * 6467 CLIENT_HASH_SIZE, GFP_KERNEL); 6468 if (!nn->conf_id_hashtbl) 6469 goto err; 6470 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) * 6471 CLIENT_HASH_SIZE, GFP_KERNEL); 6472 if (!nn->unconf_id_hashtbl) 6473 goto err_unconf_id; 6474 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * 6475 SESSION_HASH_SIZE, GFP_KERNEL); 6476 if (!nn->sessionid_hashtbl) 6477 goto err_sessionid; 6478 6479 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6480 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 6481 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 6482 } 6483 for (i = 0; i < SESSION_HASH_SIZE; i++) 6484 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 6485 nn->conf_name_tree = RB_ROOT; 6486 nn->unconf_name_tree = RB_ROOT; 6487 INIT_LIST_HEAD(&nn->client_lru); 6488 INIT_LIST_HEAD(&nn->close_lru); 6489 INIT_LIST_HEAD(&nn->del_recall_lru); 6490 spin_lock_init(&nn->client_lock); 6491 6492 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 6493 get_net(net); 6494 6495 return 0; 6496 6497 err_sessionid: 6498 kfree(nn->unconf_id_hashtbl); 6499 err_unconf_id: 6500 kfree(nn->conf_id_hashtbl); 6501 err: 6502 return -ENOMEM; 6503 } 6504 6505 static void 6506 nfs4_state_destroy_net(struct net *net) 6507 { 6508 int i; 6509 struct nfs4_client *clp = NULL; 6510 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6511 6512 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6513 while (!list_empty(&nn->conf_id_hashtbl[i])) { 6514 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 6515 destroy_client(clp); 6516 } 6517 } 6518 6519 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 6520 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 6521 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 6522 destroy_client(clp); 6523 } 6524 } 6525 6526 kfree(nn->sessionid_hashtbl); 6527 kfree(nn->unconf_id_hashtbl); 6528 kfree(nn->conf_id_hashtbl); 6529 put_net(net); 6530 } 6531 6532 int 6533 nfs4_state_start_net(struct net *net) 6534 { 6535 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6536 int ret; 6537 6538 ret = nfs4_state_create_net(net); 6539 if (ret) 6540 return ret; 6541 nn->boot_time = get_seconds(); 6542 nn->grace_ended = false; 6543 locks_start_grace(net, &nn->nfsd4_manager); 6544 nfsd4_client_tracking_init(net); 6545 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", 6546 nn->nfsd4_grace, net); 6547 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 6548 return 0; 6549 } 6550 6551 /* initialization to perform when the nfsd service is started: */ 6552 6553 int 6554 nfs4_state_start(void) 6555 { 6556 int ret; 6557 6558 ret = set_callback_cred(); 6559 if (ret) 6560 return -ENOMEM; 6561 laundry_wq = create_singlethread_workqueue("nfsd4"); 6562 if (laundry_wq == NULL) { 6563 ret = -ENOMEM; 6564 goto out_recovery; 6565 } 6566 ret = nfsd4_create_callback_queue(); 6567 if (ret) 6568 goto out_free_laundry; 6569 6570 set_max_delegations(); 6571 6572 return 0; 6573 6574 out_free_laundry: 6575 destroy_workqueue(laundry_wq); 6576 out_recovery: 6577 return ret; 6578 } 6579 6580 void 6581 nfs4_state_shutdown_net(struct net *net) 6582 { 6583 struct nfs4_delegation *dp = NULL; 6584 struct list_head *pos, *next, reaplist; 6585 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6586 6587 cancel_delayed_work_sync(&nn->laundromat_work); 6588 locks_end_grace(&nn->nfsd4_manager); 6589 6590 INIT_LIST_HEAD(&reaplist); 6591 spin_lock(&state_lock); 6592 list_for_each_safe(pos, next, &nn->del_recall_lru) { 6593 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6594 unhash_delegation_locked(dp); 6595 list_add(&dp->dl_recall_lru, &reaplist); 6596 } 6597 spin_unlock(&state_lock); 6598 list_for_each_safe(pos, next, &reaplist) { 6599 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6600 list_del_init(&dp->dl_recall_lru); 6601 put_clnt_odstate(dp->dl_clnt_odstate); 6602 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 6603 nfs4_put_stid(&dp->dl_stid); 6604 } 6605 6606 nfsd4_client_tracking_exit(net); 6607 nfs4_state_destroy_net(net); 6608 } 6609 6610 void 6611 nfs4_state_shutdown(void) 6612 { 6613 destroy_workqueue(laundry_wq); 6614 nfsd4_destroy_callback_queue(); 6615 } 6616 6617 static void 6618 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 6619 { 6620 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) 6621 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 6622 } 6623 6624 static void 6625 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 6626 { 6627 if (cstate->minorversion) { 6628 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 6629 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 6630 } 6631 } 6632 6633 void 6634 clear_current_stateid(struct nfsd4_compound_state *cstate) 6635 { 6636 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 6637 } 6638 6639 /* 6640 * functions to set current state id 6641 */ 6642 void 6643 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 6644 { 6645 put_stateid(cstate, &odp->od_stateid); 6646 } 6647 6648 void 6649 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) 6650 { 6651 put_stateid(cstate, &open->op_stateid); 6652 } 6653 6654 void 6655 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 6656 { 6657 put_stateid(cstate, &close->cl_stateid); 6658 } 6659 6660 void 6661 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) 6662 { 6663 put_stateid(cstate, &lock->lk_resp_stateid); 6664 } 6665 6666 /* 6667 * functions to consume current state id 6668 */ 6669 6670 void 6671 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 6672 { 6673 get_stateid(cstate, &odp->od_stateid); 6674 } 6675 6676 void 6677 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) 6678 { 6679 get_stateid(cstate, &drp->dr_stateid); 6680 } 6681 6682 void 6683 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) 6684 { 6685 get_stateid(cstate, &fsp->fr_stateid); 6686 } 6687 6688 void 6689 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) 6690 { 6691 get_stateid(cstate, &setattr->sa_stateid); 6692 } 6693 6694 void 6695 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 6696 { 6697 get_stateid(cstate, &close->cl_stateid); 6698 } 6699 6700 void 6701 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) 6702 { 6703 get_stateid(cstate, &locku->lu_stateid); 6704 } 6705 6706 void 6707 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) 6708 { 6709 get_stateid(cstate, &read->rd_stateid); 6710 } 6711 6712 void 6713 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) 6714 { 6715 get_stateid(cstate, &write->wr_stateid); 6716 } 6717