1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include "xdr4.h" 45 #include "xdr4cb.h" 46 #include "vfs.h" 47 #include "current_stateid.h" 48 49 #include "netns.h" 50 51 #define NFSDDBG_FACILITY NFSDDBG_PROC 52 53 #define all_ones {{~0,~0},~0} 54 static const stateid_t one_stateid = { 55 .si_generation = ~0, 56 .si_opaque = all_ones, 57 }; 58 static const stateid_t zero_stateid = { 59 /* all fields zero */ 60 }; 61 static const stateid_t currentstateid = { 62 .si_generation = 1, 63 }; 64 65 static u64 current_sessionid = 1; 66 67 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 68 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 69 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 70 71 /* forward declarations */ 72 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); 73 74 /* Locking: */ 75 76 /* Currently used for almost all code touching nfsv4 state: */ 77 static DEFINE_MUTEX(client_mutex); 78 79 /* 80 * Currently used for the del_recall_lru and file hash table. In an 81 * effort to decrease the scope of the client_mutex, this spinlock may 82 * eventually cover more: 83 */ 84 static DEFINE_SPINLOCK(recall_lock); 85 86 static struct kmem_cache *openowner_slab = NULL; 87 static struct kmem_cache *lockowner_slab = NULL; 88 static struct kmem_cache *file_slab = NULL; 89 static struct kmem_cache *stateid_slab = NULL; 90 static struct kmem_cache *deleg_slab = NULL; 91 92 void 93 nfs4_lock_state(void) 94 { 95 mutex_lock(&client_mutex); 96 } 97 98 static void free_session(struct nfsd4_session *); 99 100 void nfsd4_put_session(struct nfsd4_session *ses) 101 { 102 atomic_dec(&ses->se_ref); 103 } 104 105 static bool is_session_dead(struct nfsd4_session *ses) 106 { 107 return ses->se_flags & NFS4_SESSION_DEAD; 108 } 109 110 static __be32 mark_session_dead_locked(struct nfsd4_session *ses) 111 { 112 if (atomic_read(&ses->se_ref)) 113 return nfserr_jukebox; 114 ses->se_flags |= NFS4_SESSION_DEAD; 115 return nfs_ok; 116 } 117 118 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 119 { 120 if (is_session_dead(ses)) 121 return nfserr_badsession; 122 atomic_inc(&ses->se_ref); 123 return nfs_ok; 124 } 125 126 void 127 nfs4_unlock_state(void) 128 { 129 mutex_unlock(&client_mutex); 130 } 131 132 static bool is_client_expired(struct nfs4_client *clp) 133 { 134 return clp->cl_time == 0; 135 } 136 137 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 138 { 139 if (atomic_read(&clp->cl_refcount)) 140 return nfserr_jukebox; 141 clp->cl_time = 0; 142 return nfs_ok; 143 } 144 145 static __be32 mark_client_expired(struct nfs4_client *clp) 146 { 147 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 148 __be32 ret; 149 150 spin_lock(&nn->client_lock); 151 ret = mark_client_expired_locked(clp); 152 spin_unlock(&nn->client_lock); 153 return ret; 154 } 155 156 static __be32 get_client_locked(struct nfs4_client *clp) 157 { 158 if (is_client_expired(clp)) 159 return nfserr_expired; 160 atomic_inc(&clp->cl_refcount); 161 return nfs_ok; 162 } 163 164 /* must be called under the client_lock */ 165 static inline void 166 renew_client_locked(struct nfs4_client *clp) 167 { 168 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 169 170 if (is_client_expired(clp)) { 171 WARN_ON(1); 172 printk("%s: client (clientid %08x/%08x) already expired\n", 173 __func__, 174 clp->cl_clientid.cl_boot, 175 clp->cl_clientid.cl_id); 176 return; 177 } 178 179 dprintk("renewing client (clientid %08x/%08x)\n", 180 clp->cl_clientid.cl_boot, 181 clp->cl_clientid.cl_id); 182 list_move_tail(&clp->cl_lru, &nn->client_lru); 183 clp->cl_time = get_seconds(); 184 } 185 186 static inline void 187 renew_client(struct nfs4_client *clp) 188 { 189 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 190 191 spin_lock(&nn->client_lock); 192 renew_client_locked(clp); 193 spin_unlock(&nn->client_lock); 194 } 195 196 static void put_client_renew_locked(struct nfs4_client *clp) 197 { 198 if (!atomic_dec_and_test(&clp->cl_refcount)) 199 return; 200 if (!is_client_expired(clp)) 201 renew_client_locked(clp); 202 } 203 204 void put_client_renew(struct nfs4_client *clp) 205 { 206 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 207 208 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) 209 return; 210 if (!is_client_expired(clp)) 211 renew_client_locked(clp); 212 spin_unlock(&nn->client_lock); 213 } 214 215 216 static inline u32 217 opaque_hashval(const void *ptr, int nbytes) 218 { 219 unsigned char *cptr = (unsigned char *) ptr; 220 221 u32 x = 0; 222 while (nbytes--) { 223 x *= 37; 224 x += *cptr++; 225 } 226 return x; 227 } 228 229 static void nfsd4_free_file(struct nfs4_file *f) 230 { 231 kmem_cache_free(file_slab, f); 232 } 233 234 static inline void 235 put_nfs4_file(struct nfs4_file *fi) 236 { 237 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { 238 hlist_del(&fi->fi_hash); 239 spin_unlock(&recall_lock); 240 iput(fi->fi_inode); 241 nfsd4_free_file(fi); 242 } 243 } 244 245 static inline void 246 get_nfs4_file(struct nfs4_file *fi) 247 { 248 atomic_inc(&fi->fi_ref); 249 } 250 251 static int num_delegations; 252 unsigned long max_delegations; 253 254 /* 255 * Open owner state (share locks) 256 */ 257 258 /* hash tables for lock and open owners */ 259 #define OWNER_HASH_BITS 8 260 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 261 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 262 263 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) 264 { 265 unsigned int ret; 266 267 ret = opaque_hashval(ownername->data, ownername->len); 268 ret += clientid; 269 return ret & OWNER_HASH_MASK; 270 } 271 272 /* hash table for nfs4_file */ 273 #define FILE_HASH_BITS 8 274 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 275 276 static unsigned int file_hashval(struct inode *ino) 277 { 278 /* XXX: why are we hashing on inode pointer, anyway? */ 279 return hash_ptr(ino, FILE_HASH_BITS); 280 } 281 282 static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; 283 284 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) 285 { 286 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); 287 atomic_inc(&fp->fi_access[oflag]); 288 } 289 290 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) 291 { 292 if (oflag == O_RDWR) { 293 __nfs4_file_get_access(fp, O_RDONLY); 294 __nfs4_file_get_access(fp, O_WRONLY); 295 } else 296 __nfs4_file_get_access(fp, oflag); 297 } 298 299 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) 300 { 301 if (fp->fi_fds[oflag]) { 302 fput(fp->fi_fds[oflag]); 303 fp->fi_fds[oflag] = NULL; 304 } 305 } 306 307 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 308 { 309 if (atomic_dec_and_test(&fp->fi_access[oflag])) { 310 nfs4_file_put_fd(fp, oflag); 311 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 312 nfs4_file_put_fd(fp, O_RDWR); 313 } 314 } 315 316 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) 317 { 318 if (oflag == O_RDWR) { 319 __nfs4_file_put_access(fp, O_RDONLY); 320 __nfs4_file_put_access(fp, O_WRONLY); 321 } else 322 __nfs4_file_put_access(fp, oflag); 323 } 324 325 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct 326 kmem_cache *slab) 327 { 328 struct idr *stateids = &cl->cl_stateids; 329 struct nfs4_stid *stid; 330 int new_id; 331 332 stid = kmem_cache_alloc(slab, GFP_KERNEL); 333 if (!stid) 334 return NULL; 335 336 new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL); 337 if (new_id < 0) 338 goto out_free; 339 stid->sc_client = cl; 340 stid->sc_type = 0; 341 stid->sc_stateid.si_opaque.so_id = new_id; 342 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 343 /* Will be incremented before return to client: */ 344 stid->sc_stateid.si_generation = 0; 345 346 /* 347 * It shouldn't be a problem to reuse an opaque stateid value. 348 * I don't think it is for 4.1. But with 4.0 I worry that, for 349 * example, a stray write retransmission could be accepted by 350 * the server when it should have been rejected. Therefore, 351 * adopt a trick from the sctp code to attempt to maximize the 352 * amount of time until an id is reused, by ensuring they always 353 * "increase" (mod INT_MAX): 354 */ 355 return stid; 356 out_free: 357 kmem_cache_free(slab, stid); 358 return NULL; 359 } 360 361 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) 362 { 363 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); 364 } 365 366 static struct nfs4_delegation * 367 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type) 368 { 369 struct nfs4_delegation *dp; 370 struct nfs4_file *fp = stp->st_file; 371 372 dprintk("NFSD alloc_init_deleg\n"); 373 /* 374 * Major work on the lease subsystem (for example, to support 375 * calbacks on stat) will be required before we can support 376 * write delegations properly. 377 */ 378 if (type != NFS4_OPEN_DELEGATE_READ) 379 return NULL; 380 if (fp->fi_had_conflict) 381 return NULL; 382 if (num_delegations > max_delegations) 383 return NULL; 384 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 385 if (dp == NULL) 386 return dp; 387 dp->dl_stid.sc_type = NFS4_DELEG_STID; 388 /* 389 * delegation seqid's are never incremented. The 4.1 special 390 * meaning of seqid 0 isn't meaningful, really, but let's avoid 391 * 0 anyway just for consistency and use 1: 392 */ 393 dp->dl_stid.sc_stateid.si_generation = 1; 394 num_delegations++; 395 INIT_LIST_HEAD(&dp->dl_perfile); 396 INIT_LIST_HEAD(&dp->dl_perclnt); 397 INIT_LIST_HEAD(&dp->dl_recall_lru); 398 get_nfs4_file(fp); 399 dp->dl_file = fp; 400 dp->dl_type = type; 401 fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); 402 dp->dl_time = 0; 403 atomic_set(&dp->dl_count, 1); 404 nfsd4_init_callback(&dp->dl_recall); 405 return dp; 406 } 407 408 static void remove_stid(struct nfs4_stid *s) 409 { 410 struct idr *stateids = &s->sc_client->cl_stateids; 411 412 idr_remove(stateids, s->sc_stateid.si_opaque.so_id); 413 } 414 415 void 416 nfs4_put_delegation(struct nfs4_delegation *dp) 417 { 418 if (atomic_dec_and_test(&dp->dl_count)) { 419 kmem_cache_free(deleg_slab, dp); 420 num_delegations--; 421 } 422 } 423 424 static void nfs4_put_deleg_lease(struct nfs4_file *fp) 425 { 426 if (atomic_dec_and_test(&fp->fi_delegees)) { 427 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); 428 fp->fi_lease = NULL; 429 fput(fp->fi_deleg_file); 430 fp->fi_deleg_file = NULL; 431 } 432 } 433 434 static void unhash_stid(struct nfs4_stid *s) 435 { 436 s->sc_type = 0; 437 } 438 439 /* Called under the state lock. */ 440 static void 441 unhash_delegation(struct nfs4_delegation *dp) 442 { 443 list_del_init(&dp->dl_perclnt); 444 spin_lock(&recall_lock); 445 list_del_init(&dp->dl_perfile); 446 list_del_init(&dp->dl_recall_lru); 447 spin_unlock(&recall_lock); 448 nfs4_put_deleg_lease(dp->dl_file); 449 put_nfs4_file(dp->dl_file); 450 dp->dl_file = NULL; 451 } 452 453 454 455 static void destroy_revoked_delegation(struct nfs4_delegation *dp) 456 { 457 list_del_init(&dp->dl_recall_lru); 458 remove_stid(&dp->dl_stid); 459 nfs4_put_delegation(dp); 460 } 461 462 static void destroy_delegation(struct nfs4_delegation *dp) 463 { 464 unhash_delegation(dp); 465 remove_stid(&dp->dl_stid); 466 nfs4_put_delegation(dp); 467 } 468 469 static void revoke_delegation(struct nfs4_delegation *dp) 470 { 471 struct nfs4_client *clp = dp->dl_stid.sc_client; 472 473 if (clp->cl_minorversion == 0) 474 destroy_delegation(dp); 475 else { 476 unhash_delegation(dp); 477 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 478 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 479 } 480 } 481 482 /* 483 * SETCLIENTID state 484 */ 485 486 static unsigned int clientid_hashval(u32 id) 487 { 488 return id & CLIENT_HASH_MASK; 489 } 490 491 static unsigned int clientstr_hashval(const char *name) 492 { 493 return opaque_hashval(name, 8) & CLIENT_HASH_MASK; 494 } 495 496 /* 497 * We store the NONE, READ, WRITE, and BOTH bits separately in the 498 * st_{access,deny}_bmap field of the stateid, in order to track not 499 * only what share bits are currently in force, but also what 500 * combinations of share bits previous opens have used. This allows us 501 * to enforce the recommendation of rfc 3530 14.2.19 that the server 502 * return an error if the client attempt to downgrade to a combination 503 * of share bits not explicable by closing some of its previous opens. 504 * 505 * XXX: This enforcement is actually incomplete, since we don't keep 506 * track of access/deny bit combinations; so, e.g., we allow: 507 * 508 * OPEN allow read, deny write 509 * OPEN allow both, deny none 510 * DOWNGRADE allow read, deny none 511 * 512 * which we should reject. 513 */ 514 static unsigned int 515 bmap_to_share_mode(unsigned long bmap) { 516 int i; 517 unsigned int access = 0; 518 519 for (i = 1; i < 4; i++) { 520 if (test_bit(i, &bmap)) 521 access |= i; 522 } 523 return access; 524 } 525 526 static bool 527 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { 528 unsigned int access, deny; 529 530 access = bmap_to_share_mode(stp->st_access_bmap); 531 deny = bmap_to_share_mode(stp->st_deny_bmap); 532 if ((access & open->op_share_deny) || (deny & open->op_share_access)) 533 return false; 534 return true; 535 } 536 537 /* set share access for a given stateid */ 538 static inline void 539 set_access(u32 access, struct nfs4_ol_stateid *stp) 540 { 541 __set_bit(access, &stp->st_access_bmap); 542 } 543 544 /* clear share access for a given stateid */ 545 static inline void 546 clear_access(u32 access, struct nfs4_ol_stateid *stp) 547 { 548 __clear_bit(access, &stp->st_access_bmap); 549 } 550 551 /* test whether a given stateid has access */ 552 static inline bool 553 test_access(u32 access, struct nfs4_ol_stateid *stp) 554 { 555 return test_bit(access, &stp->st_access_bmap); 556 } 557 558 /* set share deny for a given stateid */ 559 static inline void 560 set_deny(u32 access, struct nfs4_ol_stateid *stp) 561 { 562 __set_bit(access, &stp->st_deny_bmap); 563 } 564 565 /* clear share deny for a given stateid */ 566 static inline void 567 clear_deny(u32 access, struct nfs4_ol_stateid *stp) 568 { 569 __clear_bit(access, &stp->st_deny_bmap); 570 } 571 572 /* test whether a given stateid is denying specific access */ 573 static inline bool 574 test_deny(u32 access, struct nfs4_ol_stateid *stp) 575 { 576 return test_bit(access, &stp->st_deny_bmap); 577 } 578 579 static int nfs4_access_to_omode(u32 access) 580 { 581 switch (access & NFS4_SHARE_ACCESS_BOTH) { 582 case NFS4_SHARE_ACCESS_READ: 583 return O_RDONLY; 584 case NFS4_SHARE_ACCESS_WRITE: 585 return O_WRONLY; 586 case NFS4_SHARE_ACCESS_BOTH: 587 return O_RDWR; 588 } 589 WARN_ON_ONCE(1); 590 return O_RDONLY; 591 } 592 593 /* release all access and file references for a given stateid */ 594 static void 595 release_all_access(struct nfs4_ol_stateid *stp) 596 { 597 int i; 598 599 for (i = 1; i < 4; i++) { 600 if (test_access(i, stp)) 601 nfs4_file_put_access(stp->st_file, 602 nfs4_access_to_omode(i)); 603 clear_access(i, stp); 604 } 605 } 606 607 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) 608 { 609 list_del(&stp->st_perfile); 610 list_del(&stp->st_perstateowner); 611 } 612 613 static void close_generic_stateid(struct nfs4_ol_stateid *stp) 614 { 615 release_all_access(stp); 616 put_nfs4_file(stp->st_file); 617 stp->st_file = NULL; 618 } 619 620 static void free_generic_stateid(struct nfs4_ol_stateid *stp) 621 { 622 remove_stid(&stp->st_stid); 623 kmem_cache_free(stateid_slab, stp); 624 } 625 626 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 627 { 628 struct file *file; 629 630 unhash_generic_stateid(stp); 631 unhash_stid(&stp->st_stid); 632 file = find_any_file(stp->st_file); 633 if (file) 634 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner)); 635 close_generic_stateid(stp); 636 free_generic_stateid(stp); 637 } 638 639 static void unhash_lockowner(struct nfs4_lockowner *lo) 640 { 641 struct nfs4_ol_stateid *stp; 642 643 list_del(&lo->lo_owner.so_strhash); 644 list_del(&lo->lo_perstateid); 645 list_del(&lo->lo_owner_ino_hash); 646 while (!list_empty(&lo->lo_owner.so_stateids)) { 647 stp = list_first_entry(&lo->lo_owner.so_stateids, 648 struct nfs4_ol_stateid, st_perstateowner); 649 release_lock_stateid(stp); 650 } 651 } 652 653 static void release_lockowner(struct nfs4_lockowner *lo) 654 { 655 unhash_lockowner(lo); 656 nfs4_free_lockowner(lo); 657 } 658 659 static void 660 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp) 661 { 662 struct nfs4_lockowner *lo; 663 664 while (!list_empty(&open_stp->st_lockowners)) { 665 lo = list_entry(open_stp->st_lockowners.next, 666 struct nfs4_lockowner, lo_perstateid); 667 release_lockowner(lo); 668 } 669 } 670 671 static void unhash_open_stateid(struct nfs4_ol_stateid *stp) 672 { 673 unhash_generic_stateid(stp); 674 release_stateid_lockowners(stp); 675 close_generic_stateid(stp); 676 } 677 678 static void release_open_stateid(struct nfs4_ol_stateid *stp) 679 { 680 unhash_open_stateid(stp); 681 unhash_stid(&stp->st_stid); 682 free_generic_stateid(stp); 683 } 684 685 static void unhash_openowner(struct nfs4_openowner *oo) 686 { 687 struct nfs4_ol_stateid *stp; 688 689 list_del(&oo->oo_owner.so_strhash); 690 list_del(&oo->oo_perclient); 691 while (!list_empty(&oo->oo_owner.so_stateids)) { 692 stp = list_first_entry(&oo->oo_owner.so_stateids, 693 struct nfs4_ol_stateid, st_perstateowner); 694 release_open_stateid(stp); 695 } 696 } 697 698 static void release_last_closed_stateid(struct nfs4_openowner *oo) 699 { 700 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid; 701 702 if (s) { 703 unhash_stid(&s->st_stid); 704 free_generic_stateid(s); 705 oo->oo_last_closed_stid = NULL; 706 } 707 } 708 709 static void release_openowner(struct nfs4_openowner *oo) 710 { 711 unhash_openowner(oo); 712 list_del(&oo->oo_close_lru); 713 release_last_closed_stateid(oo); 714 nfs4_free_openowner(oo); 715 } 716 717 static inline int 718 hash_sessionid(struct nfs4_sessionid *sessionid) 719 { 720 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 721 722 return sid->sequence % SESSION_HASH_SIZE; 723 } 724 725 #ifdef NFSD_DEBUG 726 static inline void 727 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 728 { 729 u32 *ptr = (u32 *)(&sessionid->data[0]); 730 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 731 } 732 #else 733 static inline void 734 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 735 { 736 } 737 #endif 738 739 /* 740 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 741 * won't be used for replay. 742 */ 743 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 744 { 745 struct nfs4_stateowner *so = cstate->replay_owner; 746 747 if (nfserr == nfserr_replay_me) 748 return; 749 750 if (!seqid_mutating_err(ntohl(nfserr))) { 751 cstate->replay_owner = NULL; 752 return; 753 } 754 if (!so) 755 return; 756 if (so->so_is_open_owner) 757 release_last_closed_stateid(openowner(so)); 758 so->so_seqid++; 759 return; 760 } 761 762 static void 763 gen_sessionid(struct nfsd4_session *ses) 764 { 765 struct nfs4_client *clp = ses->se_client; 766 struct nfsd4_sessionid *sid; 767 768 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 769 sid->clientid = clp->cl_clientid; 770 sid->sequence = current_sessionid++; 771 sid->reserved = 0; 772 } 773 774 /* 775 * The protocol defines ca_maxresponssize_cached to include the size of 776 * the rpc header, but all we need to cache is the data starting after 777 * the end of the initial SEQUENCE operation--the rest we regenerate 778 * each time. Therefore we can advertise a ca_maxresponssize_cached 779 * value that is the number of bytes in our cache plus a few additional 780 * bytes. In order to stay on the safe side, and not promise more than 781 * we can cache, those additional bytes must be the minimum possible: 24 782 * bytes of rpc header (xid through accept state, with AUTH_NULL 783 * verifier), 12 for the compound header (with zero-length tag), and 44 784 * for the SEQUENCE op response: 785 */ 786 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 787 788 static void 789 free_session_slots(struct nfsd4_session *ses) 790 { 791 int i; 792 793 for (i = 0; i < ses->se_fchannel.maxreqs; i++) 794 kfree(ses->se_slots[i]); 795 } 796 797 /* 798 * We don't actually need to cache the rpc and session headers, so we 799 * can allocate a little less for each slot: 800 */ 801 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 802 { 803 u32 size; 804 805 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 806 size = 0; 807 else 808 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 809 return size + sizeof(struct nfsd4_slot); 810 } 811 812 /* 813 * XXX: If we run out of reserved DRC memory we could (up to a point) 814 * re-negotiate active sessions and reduce their slot usage to make 815 * room for new connections. For now we just fail the create session. 816 */ 817 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) 818 { 819 u32 slotsize = slot_bytes(ca); 820 u32 num = ca->maxreqs; 821 int avail; 822 823 spin_lock(&nfsd_drc_lock); 824 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, 825 nfsd_drc_max_mem - nfsd_drc_mem_used); 826 num = min_t(int, num, avail / slotsize); 827 nfsd_drc_mem_used += num * slotsize; 828 spin_unlock(&nfsd_drc_lock); 829 830 return num; 831 } 832 833 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 834 { 835 int slotsize = slot_bytes(ca); 836 837 spin_lock(&nfsd_drc_lock); 838 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 839 spin_unlock(&nfsd_drc_lock); 840 } 841 842 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs) 843 { 844 int numslots = attrs->maxreqs; 845 int slotsize = slot_bytes(attrs); 846 struct nfsd4_session *new; 847 int mem, i; 848 849 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 850 + sizeof(struct nfsd4_session) > PAGE_SIZE); 851 mem = numslots * sizeof(struct nfsd4_slot *); 852 853 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 854 if (!new) 855 return NULL; 856 /* allocate each struct nfsd4_slot and data cache in one piece */ 857 for (i = 0; i < numslots; i++) { 858 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 859 if (!new->se_slots[i]) 860 goto out_free; 861 } 862 return new; 863 out_free: 864 while (i--) 865 kfree(new->se_slots[i]); 866 kfree(new); 867 return NULL; 868 } 869 870 static void free_conn(struct nfsd4_conn *c) 871 { 872 svc_xprt_put(c->cn_xprt); 873 kfree(c); 874 } 875 876 static void nfsd4_conn_lost(struct svc_xpt_user *u) 877 { 878 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 879 struct nfs4_client *clp = c->cn_session->se_client; 880 881 spin_lock(&clp->cl_lock); 882 if (!list_empty(&c->cn_persession)) { 883 list_del(&c->cn_persession); 884 free_conn(c); 885 } 886 nfsd4_probe_callback(clp); 887 spin_unlock(&clp->cl_lock); 888 } 889 890 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 891 { 892 struct nfsd4_conn *conn; 893 894 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 895 if (!conn) 896 return NULL; 897 svc_xprt_get(rqstp->rq_xprt); 898 conn->cn_xprt = rqstp->rq_xprt; 899 conn->cn_flags = flags; 900 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 901 return conn; 902 } 903 904 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 905 { 906 conn->cn_session = ses; 907 list_add(&conn->cn_persession, &ses->se_conns); 908 } 909 910 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 911 { 912 struct nfs4_client *clp = ses->se_client; 913 914 spin_lock(&clp->cl_lock); 915 __nfsd4_hash_conn(conn, ses); 916 spin_unlock(&clp->cl_lock); 917 } 918 919 static int nfsd4_register_conn(struct nfsd4_conn *conn) 920 { 921 conn->cn_xpt_user.callback = nfsd4_conn_lost; 922 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 923 } 924 925 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 926 { 927 int ret; 928 929 nfsd4_hash_conn(conn, ses); 930 ret = nfsd4_register_conn(conn); 931 if (ret) 932 /* oops; xprt is already down: */ 933 nfsd4_conn_lost(&conn->cn_xpt_user); 934 if (conn->cn_flags & NFS4_CDFC4_BACK) { 935 /* callback channel may be back up */ 936 nfsd4_probe_callback(ses->se_client); 937 } 938 } 939 940 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 941 { 942 u32 dir = NFS4_CDFC4_FORE; 943 944 if (cses->flags & SESSION4_BACK_CHAN) 945 dir |= NFS4_CDFC4_BACK; 946 return alloc_conn(rqstp, dir); 947 } 948 949 /* must be called under client_lock */ 950 static void nfsd4_del_conns(struct nfsd4_session *s) 951 { 952 struct nfs4_client *clp = s->se_client; 953 struct nfsd4_conn *c; 954 955 spin_lock(&clp->cl_lock); 956 while (!list_empty(&s->se_conns)) { 957 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 958 list_del_init(&c->cn_persession); 959 spin_unlock(&clp->cl_lock); 960 961 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 962 free_conn(c); 963 964 spin_lock(&clp->cl_lock); 965 } 966 spin_unlock(&clp->cl_lock); 967 } 968 969 static void __free_session(struct nfsd4_session *ses) 970 { 971 free_session_slots(ses); 972 kfree(ses); 973 } 974 975 static void free_session(struct nfsd4_session *ses) 976 { 977 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id); 978 979 lockdep_assert_held(&nn->client_lock); 980 nfsd4_del_conns(ses); 981 nfsd4_put_drc_mem(&ses->se_fchannel); 982 __free_session(ses); 983 } 984 985 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 986 { 987 int idx; 988 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 989 990 new->se_client = clp; 991 gen_sessionid(new); 992 993 INIT_LIST_HEAD(&new->se_conns); 994 995 new->se_cb_seq_nr = 1; 996 new->se_flags = cses->flags; 997 new->se_cb_prog = cses->callback_prog; 998 new->se_cb_sec = cses->cb_sec; 999 atomic_set(&new->se_ref, 0); 1000 idx = hash_sessionid(&new->se_sessionid); 1001 spin_lock(&nn->client_lock); 1002 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1003 spin_lock(&clp->cl_lock); 1004 list_add(&new->se_perclnt, &clp->cl_sessions); 1005 spin_unlock(&clp->cl_lock); 1006 spin_unlock(&nn->client_lock); 1007 memcpy(&new->se_fchannel, &cses->fore_channel, 1008 sizeof(struct nfsd4_channel_attrs)); 1009 if (cses->flags & SESSION4_BACK_CHAN) { 1010 struct sockaddr *sa = svc_addr(rqstp); 1011 /* 1012 * This is a little silly; with sessions there's no real 1013 * use for the callback address. Use the peer address 1014 * as a reasonable default for now, but consider fixing 1015 * the rpc client not to require an address in the 1016 * future: 1017 */ 1018 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 1019 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 1020 } 1021 } 1022 1023 /* caller must hold client_lock */ 1024 static struct nfsd4_session * 1025 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1026 { 1027 struct nfsd4_session *elem; 1028 int idx; 1029 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1030 1031 dump_sessionid(__func__, sessionid); 1032 idx = hash_sessionid(sessionid); 1033 /* Search in the appropriate list */ 1034 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 1035 if (!memcmp(elem->se_sessionid.data, sessionid->data, 1036 NFS4_MAX_SESSIONID_LEN)) { 1037 return elem; 1038 } 1039 } 1040 1041 dprintk("%s: session not found\n", __func__); 1042 return NULL; 1043 } 1044 1045 /* caller must hold client_lock */ 1046 static void 1047 unhash_session(struct nfsd4_session *ses) 1048 { 1049 list_del(&ses->se_hash); 1050 spin_lock(&ses->se_client->cl_lock); 1051 list_del(&ses->se_perclnt); 1052 spin_unlock(&ses->se_client->cl_lock); 1053 } 1054 1055 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1056 static int 1057 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1058 { 1059 if (clid->cl_boot == nn->boot_time) 1060 return 0; 1061 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 1062 clid->cl_boot, clid->cl_id, nn->boot_time); 1063 return 1; 1064 } 1065 1066 /* 1067 * XXX Should we use a slab cache ? 1068 * This type of memory management is somewhat inefficient, but we use it 1069 * anyway since SETCLIENTID is not a common operation. 1070 */ 1071 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1072 { 1073 struct nfs4_client *clp; 1074 1075 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 1076 if (clp == NULL) 1077 return NULL; 1078 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); 1079 if (clp->cl_name.data == NULL) { 1080 kfree(clp); 1081 return NULL; 1082 } 1083 clp->cl_name.len = name.len; 1084 return clp; 1085 } 1086 1087 static inline void 1088 free_client(struct nfs4_client *clp) 1089 { 1090 struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id); 1091 1092 lockdep_assert_held(&nn->client_lock); 1093 while (!list_empty(&clp->cl_sessions)) { 1094 struct nfsd4_session *ses; 1095 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1096 se_perclnt); 1097 list_del(&ses->se_perclnt); 1098 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 1099 free_session(ses); 1100 } 1101 free_svc_cred(&clp->cl_cred); 1102 kfree(clp->cl_name.data); 1103 idr_destroy(&clp->cl_stateids); 1104 kfree(clp); 1105 } 1106 1107 /* must be called under the client_lock */ 1108 static inline void 1109 unhash_client_locked(struct nfs4_client *clp) 1110 { 1111 struct nfsd4_session *ses; 1112 1113 list_del(&clp->cl_lru); 1114 spin_lock(&clp->cl_lock); 1115 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1116 list_del_init(&ses->se_hash); 1117 spin_unlock(&clp->cl_lock); 1118 } 1119 1120 static void 1121 destroy_client(struct nfs4_client *clp) 1122 { 1123 struct nfs4_openowner *oo; 1124 struct nfs4_delegation *dp; 1125 struct list_head reaplist; 1126 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1127 1128 INIT_LIST_HEAD(&reaplist); 1129 spin_lock(&recall_lock); 1130 while (!list_empty(&clp->cl_delegations)) { 1131 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 1132 list_del_init(&dp->dl_perclnt); 1133 list_move(&dp->dl_recall_lru, &reaplist); 1134 } 1135 spin_unlock(&recall_lock); 1136 while (!list_empty(&reaplist)) { 1137 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1138 destroy_delegation(dp); 1139 } 1140 while (!list_empty(&clp->cl_openowners)) { 1141 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1142 release_openowner(oo); 1143 } 1144 nfsd4_shutdown_callback(clp); 1145 if (clp->cl_cb_conn.cb_xprt) 1146 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 1147 list_del(&clp->cl_idhash); 1148 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 1149 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 1150 else 1151 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1152 spin_lock(&nn->client_lock); 1153 unhash_client_locked(clp); 1154 WARN_ON_ONCE(atomic_read(&clp->cl_refcount)); 1155 free_client(clp); 1156 spin_unlock(&nn->client_lock); 1157 } 1158 1159 static void expire_client(struct nfs4_client *clp) 1160 { 1161 nfsd4_client_record_remove(clp); 1162 destroy_client(clp); 1163 } 1164 1165 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 1166 { 1167 memcpy(target->cl_verifier.data, source->data, 1168 sizeof(target->cl_verifier.data)); 1169 } 1170 1171 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 1172 { 1173 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 1174 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 1175 } 1176 1177 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 1178 { 1179 if (source->cr_principal) { 1180 target->cr_principal = 1181 kstrdup(source->cr_principal, GFP_KERNEL); 1182 if (target->cr_principal == NULL) 1183 return -ENOMEM; 1184 } else 1185 target->cr_principal = NULL; 1186 target->cr_flavor = source->cr_flavor; 1187 target->cr_uid = source->cr_uid; 1188 target->cr_gid = source->cr_gid; 1189 target->cr_group_info = source->cr_group_info; 1190 get_group_info(target->cr_group_info); 1191 return 0; 1192 } 1193 1194 static long long 1195 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 1196 { 1197 long long res; 1198 1199 res = o1->len - o2->len; 1200 if (res) 1201 return res; 1202 return (long long)memcmp(o1->data, o2->data, o1->len); 1203 } 1204 1205 static int same_name(const char *n1, const char *n2) 1206 { 1207 return 0 == memcmp(n1, n2, HEXDIR_LEN); 1208 } 1209 1210 static int 1211 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 1212 { 1213 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 1214 } 1215 1216 static int 1217 same_clid(clientid_t *cl1, clientid_t *cl2) 1218 { 1219 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1220 } 1221 1222 static bool groups_equal(struct group_info *g1, struct group_info *g2) 1223 { 1224 int i; 1225 1226 if (g1->ngroups != g2->ngroups) 1227 return false; 1228 for (i=0; i<g1->ngroups; i++) 1229 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i))) 1230 return false; 1231 return true; 1232 } 1233 1234 /* 1235 * RFC 3530 language requires clid_inuse be returned when the 1236 * "principal" associated with a requests differs from that previously 1237 * used. We use uid, gid's, and gss principal string as our best 1238 * approximation. We also don't want to allow non-gss use of a client 1239 * established using gss: in theory cr_principal should catch that 1240 * change, but in practice cr_principal can be null even in the gss case 1241 * since gssd doesn't always pass down a principal string. 1242 */ 1243 static bool is_gss_cred(struct svc_cred *cr) 1244 { 1245 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 1246 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 1247 } 1248 1249 1250 static bool 1251 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1252 { 1253 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 1254 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 1255 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 1256 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 1257 return false; 1258 if (cr1->cr_principal == cr2->cr_principal) 1259 return true; 1260 if (!cr1->cr_principal || !cr2->cr_principal) 1261 return false; 1262 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 1263 } 1264 1265 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 1266 { 1267 static u32 current_clientid = 1; 1268 1269 clp->cl_clientid.cl_boot = nn->boot_time; 1270 clp->cl_clientid.cl_id = current_clientid++; 1271 } 1272 1273 static void gen_confirm(struct nfs4_client *clp) 1274 { 1275 __be32 verf[2]; 1276 static u32 i; 1277 1278 verf[0] = (__be32)get_seconds(); 1279 verf[1] = (__be32)i++; 1280 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 1281 } 1282 1283 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) 1284 { 1285 struct nfs4_stid *ret; 1286 1287 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 1288 if (!ret || !ret->sc_type) 1289 return NULL; 1290 return ret; 1291 } 1292 1293 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 1294 { 1295 struct nfs4_stid *s; 1296 1297 s = find_stateid(cl, t); 1298 if (!s) 1299 return NULL; 1300 if (typemask & s->sc_type) 1301 return s; 1302 return NULL; 1303 } 1304 1305 static struct nfs4_client *create_client(struct xdr_netobj name, 1306 struct svc_rqst *rqstp, nfs4_verifier *verf) 1307 { 1308 struct nfs4_client *clp; 1309 struct sockaddr *sa = svc_addr(rqstp); 1310 int ret; 1311 struct net *net = SVC_NET(rqstp); 1312 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1313 1314 clp = alloc_client(name); 1315 if (clp == NULL) 1316 return NULL; 1317 1318 INIT_LIST_HEAD(&clp->cl_sessions); 1319 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1320 if (ret) { 1321 spin_lock(&nn->client_lock); 1322 free_client(clp); 1323 spin_unlock(&nn->client_lock); 1324 return NULL; 1325 } 1326 idr_init(&clp->cl_stateids); 1327 atomic_set(&clp->cl_refcount, 0); 1328 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1329 INIT_LIST_HEAD(&clp->cl_idhash); 1330 INIT_LIST_HEAD(&clp->cl_openowners); 1331 INIT_LIST_HEAD(&clp->cl_delegations); 1332 INIT_LIST_HEAD(&clp->cl_lru); 1333 INIT_LIST_HEAD(&clp->cl_callbacks); 1334 INIT_LIST_HEAD(&clp->cl_revoked); 1335 spin_lock_init(&clp->cl_lock); 1336 nfsd4_init_callback(&clp->cl_cb_null); 1337 clp->cl_time = get_seconds(); 1338 clear_bit(0, &clp->cl_cb_slot_busy); 1339 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1340 copy_verf(clp, verf); 1341 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1342 gen_confirm(clp); 1343 clp->cl_cb_session = NULL; 1344 clp->net = net; 1345 return clp; 1346 } 1347 1348 static void 1349 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 1350 { 1351 struct rb_node **new = &(root->rb_node), *parent = NULL; 1352 struct nfs4_client *clp; 1353 1354 while (*new) { 1355 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 1356 parent = *new; 1357 1358 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 1359 new = &((*new)->rb_left); 1360 else 1361 new = &((*new)->rb_right); 1362 } 1363 1364 rb_link_node(&new_clp->cl_namenode, parent, new); 1365 rb_insert_color(&new_clp->cl_namenode, root); 1366 } 1367 1368 static struct nfs4_client * 1369 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 1370 { 1371 long long cmp; 1372 struct rb_node *node = root->rb_node; 1373 struct nfs4_client *clp; 1374 1375 while (node) { 1376 clp = rb_entry(node, struct nfs4_client, cl_namenode); 1377 cmp = compare_blob(&clp->cl_name, name); 1378 if (cmp > 0) 1379 node = node->rb_left; 1380 else if (cmp < 0) 1381 node = node->rb_right; 1382 else 1383 return clp; 1384 } 1385 return NULL; 1386 } 1387 1388 static void 1389 add_to_unconfirmed(struct nfs4_client *clp) 1390 { 1391 unsigned int idhashval; 1392 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1393 1394 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 1395 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 1396 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1397 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 1398 renew_client(clp); 1399 } 1400 1401 static void 1402 move_to_confirmed(struct nfs4_client *clp) 1403 { 1404 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1405 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1406 1407 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 1408 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 1409 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1410 add_clp_to_name_tree(clp, &nn->conf_name_tree); 1411 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 1412 renew_client(clp); 1413 } 1414 1415 static struct nfs4_client * 1416 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 1417 { 1418 struct nfs4_client *clp; 1419 unsigned int idhashval = clientid_hashval(clid->cl_id); 1420 1421 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 1422 if (same_clid(&clp->cl_clientid, clid)) { 1423 if ((bool)clp->cl_minorversion != sessions) 1424 return NULL; 1425 renew_client(clp); 1426 return clp; 1427 } 1428 } 1429 return NULL; 1430 } 1431 1432 static struct nfs4_client * 1433 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 1434 { 1435 struct list_head *tbl = nn->conf_id_hashtbl; 1436 1437 return find_client_in_id_table(tbl, clid, sessions); 1438 } 1439 1440 static struct nfs4_client * 1441 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 1442 { 1443 struct list_head *tbl = nn->unconf_id_hashtbl; 1444 1445 return find_client_in_id_table(tbl, clid, sessions); 1446 } 1447 1448 static bool clp_used_exchangeid(struct nfs4_client *clp) 1449 { 1450 return clp->cl_exchange_flags != 0; 1451 } 1452 1453 static struct nfs4_client * 1454 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 1455 { 1456 return find_clp_in_name_tree(name, &nn->conf_name_tree); 1457 } 1458 1459 static struct nfs4_client * 1460 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 1461 { 1462 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 1463 } 1464 1465 static void 1466 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 1467 { 1468 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 1469 struct sockaddr *sa = svc_addr(rqstp); 1470 u32 scopeid = rpc_get_scope_id(sa); 1471 unsigned short expected_family; 1472 1473 /* Currently, we only support tcp and tcp6 for the callback channel */ 1474 if (se->se_callback_netid_len == 3 && 1475 !memcmp(se->se_callback_netid_val, "tcp", 3)) 1476 expected_family = AF_INET; 1477 else if (se->se_callback_netid_len == 4 && 1478 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 1479 expected_family = AF_INET6; 1480 else 1481 goto out_err; 1482 1483 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 1484 se->se_callback_addr_len, 1485 (struct sockaddr *)&conn->cb_addr, 1486 sizeof(conn->cb_addr)); 1487 1488 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 1489 goto out_err; 1490 1491 if (conn->cb_addr.ss_family == AF_INET6) 1492 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 1493 1494 conn->cb_prog = se->se_callback_prog; 1495 conn->cb_ident = se->se_callback_ident; 1496 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 1497 return; 1498 out_err: 1499 conn->cb_addr.ss_family = AF_UNSPEC; 1500 conn->cb_addrlen = 0; 1501 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " 1502 "will not receive delegations\n", 1503 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 1504 1505 return; 1506 } 1507 1508 /* 1509 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. 1510 */ 1511 void 1512 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 1513 { 1514 struct nfsd4_slot *slot = resp->cstate.slot; 1515 unsigned int base; 1516 1517 dprintk("--> %s slot %p\n", __func__, slot); 1518 1519 slot->sl_opcnt = resp->opcnt; 1520 slot->sl_status = resp->cstate.status; 1521 1522 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 1523 if (nfsd4_not_cached(resp)) { 1524 slot->sl_datalen = 0; 1525 return; 1526 } 1527 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; 1528 base = (char *)resp->cstate.datap - 1529 (char *)resp->xbuf->head[0].iov_base; 1530 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, 1531 slot->sl_datalen)) 1532 WARN("%s: sessions DRC could not cache compound\n", __func__); 1533 return; 1534 } 1535 1536 /* 1537 * Encode the replay sequence operation from the slot values. 1538 * If cachethis is FALSE encode the uncached rep error on the next 1539 * operation which sets resp->p and increments resp->opcnt for 1540 * nfs4svc_encode_compoundres. 1541 * 1542 */ 1543 static __be32 1544 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 1545 struct nfsd4_compoundres *resp) 1546 { 1547 struct nfsd4_op *op; 1548 struct nfsd4_slot *slot = resp->cstate.slot; 1549 1550 /* Encode the replayed sequence operation */ 1551 op = &args->ops[resp->opcnt - 1]; 1552 nfsd4_encode_operation(resp, op); 1553 1554 /* Return nfserr_retry_uncached_rep in next operation. */ 1555 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { 1556 op = &args->ops[resp->opcnt++]; 1557 op->status = nfserr_retry_uncached_rep; 1558 nfsd4_encode_operation(resp, op); 1559 } 1560 return op->status; 1561 } 1562 1563 /* 1564 * The sequence operation is not cached because we can use the slot and 1565 * session values. 1566 */ 1567 __be32 1568 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 1569 struct nfsd4_sequence *seq) 1570 { 1571 struct nfsd4_slot *slot = resp->cstate.slot; 1572 __be32 status; 1573 1574 dprintk("--> %s slot %p\n", __func__, slot); 1575 1576 /* Either returns 0 or nfserr_retry_uncached */ 1577 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 1578 if (status == nfserr_retry_uncached_rep) 1579 return status; 1580 1581 /* The sequence operation has been encoded, cstate->datap set. */ 1582 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); 1583 1584 resp->opcnt = slot->sl_opcnt; 1585 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); 1586 status = slot->sl_status; 1587 1588 return status; 1589 } 1590 1591 /* 1592 * Set the exchange_id flags returned by the server. 1593 */ 1594 static void 1595 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 1596 { 1597 /* pNFS is not supported */ 1598 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 1599 1600 /* Referrals are supported, Migration is not. */ 1601 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 1602 1603 /* set the wire flags to return to client. */ 1604 clid->flags = new->cl_exchange_flags; 1605 } 1606 1607 static bool client_has_state(struct nfs4_client *clp) 1608 { 1609 /* 1610 * Note clp->cl_openowners check isn't quite right: there's no 1611 * need to count owners without stateid's. 1612 * 1613 * Also note we should probably be using this in 4.0 case too. 1614 */ 1615 return !list_empty(&clp->cl_openowners) 1616 || !list_empty(&clp->cl_delegations) 1617 || !list_empty(&clp->cl_sessions); 1618 } 1619 1620 __be32 1621 nfsd4_exchange_id(struct svc_rqst *rqstp, 1622 struct nfsd4_compound_state *cstate, 1623 struct nfsd4_exchange_id *exid) 1624 { 1625 struct nfs4_client *unconf, *conf, *new; 1626 __be32 status; 1627 char addr_str[INET6_ADDRSTRLEN]; 1628 nfs4_verifier verf = exid->verifier; 1629 struct sockaddr *sa = svc_addr(rqstp); 1630 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 1631 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1632 1633 rpc_ntop(sa, addr_str, sizeof(addr_str)); 1634 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 1635 "ip_addr=%s flags %x, spa_how %d\n", 1636 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 1637 addr_str, exid->flags, exid->spa_how); 1638 1639 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 1640 return nfserr_inval; 1641 1642 /* Currently only support SP4_NONE */ 1643 switch (exid->spa_how) { 1644 case SP4_NONE: 1645 break; 1646 default: /* checked by xdr code */ 1647 WARN_ON_ONCE(1); 1648 case SP4_SSV: 1649 return nfserr_encr_alg_unsupp; 1650 case SP4_MACH_CRED: 1651 return nfserr_serverfault; /* no excuse :-/ */ 1652 } 1653 1654 /* Cases below refer to rfc 5661 section 18.35.4: */ 1655 nfs4_lock_state(); 1656 conf = find_confirmed_client_by_name(&exid->clname, nn); 1657 if (conf) { 1658 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 1659 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 1660 1661 if (update) { 1662 if (!clp_used_exchangeid(conf)) { /* buggy client */ 1663 status = nfserr_inval; 1664 goto out; 1665 } 1666 if (!creds_match) { /* case 9 */ 1667 status = nfserr_perm; 1668 goto out; 1669 } 1670 if (!verfs_match) { /* case 8 */ 1671 status = nfserr_not_same; 1672 goto out; 1673 } 1674 /* case 6 */ 1675 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 1676 new = conf; 1677 goto out_copy; 1678 } 1679 if (!creds_match) { /* case 3 */ 1680 if (client_has_state(conf)) { 1681 status = nfserr_clid_inuse; 1682 goto out; 1683 } 1684 expire_client(conf); 1685 goto out_new; 1686 } 1687 if (verfs_match) { /* case 2 */ 1688 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 1689 new = conf; 1690 goto out_copy; 1691 } 1692 /* case 5, client reboot */ 1693 goto out_new; 1694 } 1695 1696 if (update) { /* case 7 */ 1697 status = nfserr_noent; 1698 goto out; 1699 } 1700 1701 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 1702 if (unconf) /* case 4, possible retry or client restart */ 1703 expire_client(unconf); 1704 1705 /* case 1 (normal case) */ 1706 out_new: 1707 new = create_client(exid->clname, rqstp, &verf); 1708 if (new == NULL) { 1709 status = nfserr_jukebox; 1710 goto out; 1711 } 1712 new->cl_minorversion = 1; 1713 1714 gen_clid(new, nn); 1715 add_to_unconfirmed(new); 1716 out_copy: 1717 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 1718 exid->clientid.cl_id = new->cl_clientid.cl_id; 1719 1720 exid->seqid = new->cl_cs_slot.sl_seqid + 1; 1721 nfsd4_set_ex_flags(new, exid); 1722 1723 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 1724 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); 1725 status = nfs_ok; 1726 1727 out: 1728 nfs4_unlock_state(); 1729 return status; 1730 } 1731 1732 static __be32 1733 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 1734 { 1735 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 1736 slot_seqid); 1737 1738 /* The slot is in use, and no response has been sent. */ 1739 if (slot_inuse) { 1740 if (seqid == slot_seqid) 1741 return nfserr_jukebox; 1742 else 1743 return nfserr_seq_misordered; 1744 } 1745 /* Note unsigned 32-bit arithmetic handles wraparound: */ 1746 if (likely(seqid == slot_seqid + 1)) 1747 return nfs_ok; 1748 if (seqid == slot_seqid) 1749 return nfserr_replay_cache; 1750 return nfserr_seq_misordered; 1751 } 1752 1753 /* 1754 * Cache the create session result into the create session single DRC 1755 * slot cache by saving the xdr structure. sl_seqid has been set. 1756 * Do this for solo or embedded create session operations. 1757 */ 1758 static void 1759 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 1760 struct nfsd4_clid_slot *slot, __be32 nfserr) 1761 { 1762 slot->sl_status = nfserr; 1763 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 1764 } 1765 1766 static __be32 1767 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 1768 struct nfsd4_clid_slot *slot) 1769 { 1770 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 1771 return slot->sl_status; 1772 } 1773 1774 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 1775 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 1776 1 + /* MIN tag is length with zero, only length */ \ 1777 3 + /* version, opcount, opcode */ \ 1778 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1779 /* seqid, slotID, slotID, cache */ \ 1780 4 ) * sizeof(__be32)) 1781 1782 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 1783 2 + /* verifier: AUTH_NULL, length 0 */\ 1784 1 + /* status */ \ 1785 1 + /* MIN tag is length with zero, only length */ \ 1786 3 + /* opcount, opcode, opstatus*/ \ 1787 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1788 /* seqid, slotID, slotID, slotID, status */ \ 1789 5 ) * sizeof(__be32)) 1790 1791 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 1792 { 1793 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 1794 1795 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 1796 return nfserr_toosmall; 1797 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 1798 return nfserr_toosmall; 1799 ca->headerpadsz = 0; 1800 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 1801 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 1802 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 1803 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 1804 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 1805 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 1806 /* 1807 * Note decreasing slot size below client's request may make it 1808 * difficult for client to function correctly, whereas 1809 * decreasing the number of slots will (just?) affect 1810 * performance. When short on memory we therefore prefer to 1811 * decrease number of slots instead of their size. Clients that 1812 * request larger slots than they need will get poor results: 1813 */ 1814 ca->maxreqs = nfsd4_get_drc_mem(ca); 1815 if (!ca->maxreqs) 1816 return nfserr_jukebox; 1817 1818 return nfs_ok; 1819 } 1820 1821 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 1822 { 1823 ca->headerpadsz = 0; 1824 1825 /* 1826 * These RPC_MAX_HEADER macros are overkill, especially since we 1827 * don't even do gss on the backchannel yet. But this is still 1828 * less than 1k. Tighten up this estimate in the unlikely event 1829 * it turns out to be a problem for some client: 1830 */ 1831 if (ca->maxreq_sz < NFS4_enc_cb_recall_sz + RPC_MAX_HEADER_WITH_AUTH) 1832 return nfserr_toosmall; 1833 if (ca->maxresp_sz < NFS4_dec_cb_recall_sz + RPC_MAX_REPHEADER_WITH_AUTH) 1834 return nfserr_toosmall; 1835 ca->maxresp_cached = 0; 1836 if (ca->maxops < 2) 1837 return nfserr_toosmall; 1838 1839 return nfs_ok; 1840 } 1841 1842 __be32 1843 nfsd4_create_session(struct svc_rqst *rqstp, 1844 struct nfsd4_compound_state *cstate, 1845 struct nfsd4_create_session *cr_ses) 1846 { 1847 struct sockaddr *sa = svc_addr(rqstp); 1848 struct nfs4_client *conf, *unconf; 1849 struct nfsd4_session *new; 1850 struct nfsd4_conn *conn; 1851 struct nfsd4_clid_slot *cs_slot = NULL; 1852 __be32 status = 0; 1853 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1854 1855 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 1856 return nfserr_inval; 1857 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 1858 if (status) 1859 return status; 1860 status = check_backchannel_attrs(&cr_ses->back_channel); 1861 if (status) 1862 return status; 1863 status = nfserr_jukebox; 1864 new = alloc_session(&cr_ses->fore_channel); 1865 if (!new) 1866 goto out_release_drc_mem; 1867 conn = alloc_conn_from_crses(rqstp, cr_ses); 1868 if (!conn) 1869 goto out_free_session; 1870 1871 nfs4_lock_state(); 1872 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 1873 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 1874 WARN_ON_ONCE(conf && unconf); 1875 1876 if (conf) { 1877 cs_slot = &conf->cl_cs_slot; 1878 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1879 if (status == nfserr_replay_cache) { 1880 status = nfsd4_replay_create_session(cr_ses, cs_slot); 1881 goto out_free_conn; 1882 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { 1883 status = nfserr_seq_misordered; 1884 goto out_free_conn; 1885 } 1886 } else if (unconf) { 1887 struct nfs4_client *old; 1888 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 1889 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 1890 status = nfserr_clid_inuse; 1891 goto out_free_conn; 1892 } 1893 cs_slot = &unconf->cl_cs_slot; 1894 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1895 if (status) { 1896 /* an unconfirmed replay returns misordered */ 1897 status = nfserr_seq_misordered; 1898 goto out_free_conn; 1899 } 1900 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 1901 if (old) { 1902 status = mark_client_expired(old); 1903 if (status) 1904 goto out_free_conn; 1905 expire_client(old); 1906 } 1907 move_to_confirmed(unconf); 1908 conf = unconf; 1909 } else { 1910 status = nfserr_stale_clientid; 1911 goto out_free_conn; 1912 } 1913 status = nfs_ok; 1914 /* 1915 * We do not support RDMA or persistent sessions 1916 */ 1917 cr_ses->flags &= ~SESSION4_PERSIST; 1918 cr_ses->flags &= ~SESSION4_RDMA; 1919 1920 init_session(rqstp, new, conf, cr_ses); 1921 nfsd4_init_conn(rqstp, conn, new); 1922 1923 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 1924 NFS4_MAX_SESSIONID_LEN); 1925 cs_slot->sl_seqid++; 1926 cr_ses->seqid = cs_slot->sl_seqid; 1927 1928 /* cache solo and embedded create sessions under the state lock */ 1929 nfsd4_cache_create_session(cr_ses, cs_slot, status); 1930 nfs4_unlock_state(); 1931 return status; 1932 out_free_conn: 1933 nfs4_unlock_state(); 1934 free_conn(conn); 1935 out_free_session: 1936 __free_session(new); 1937 out_release_drc_mem: 1938 nfsd4_put_drc_mem(&cr_ses->fore_channel); 1939 return status; 1940 } 1941 1942 static __be32 nfsd4_map_bcts_dir(u32 *dir) 1943 { 1944 switch (*dir) { 1945 case NFS4_CDFC4_FORE: 1946 case NFS4_CDFC4_BACK: 1947 return nfs_ok; 1948 case NFS4_CDFC4_FORE_OR_BOTH: 1949 case NFS4_CDFC4_BACK_OR_BOTH: 1950 *dir = NFS4_CDFC4_BOTH; 1951 return nfs_ok; 1952 }; 1953 return nfserr_inval; 1954 } 1955 1956 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc) 1957 { 1958 struct nfsd4_session *session = cstate->session; 1959 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1960 1961 spin_lock(&nn->client_lock); 1962 session->se_cb_prog = bc->bc_cb_program; 1963 session->se_cb_sec = bc->bc_cb_sec; 1964 spin_unlock(&nn->client_lock); 1965 1966 nfsd4_probe_callback(session->se_client); 1967 1968 return nfs_ok; 1969 } 1970 1971 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 1972 struct nfsd4_compound_state *cstate, 1973 struct nfsd4_bind_conn_to_session *bcts) 1974 { 1975 __be32 status; 1976 struct nfsd4_conn *conn; 1977 struct nfsd4_session *session; 1978 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1979 1980 if (!nfsd4_last_compound_op(rqstp)) 1981 return nfserr_not_only_op; 1982 nfs4_lock_state(); 1983 spin_lock(&nn->client_lock); 1984 session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); 1985 spin_unlock(&nn->client_lock); 1986 status = nfserr_badsession; 1987 if (!session) 1988 goto out; 1989 status = nfsd4_map_bcts_dir(&bcts->dir); 1990 if (status) 1991 goto out; 1992 conn = alloc_conn(rqstp, bcts->dir); 1993 status = nfserr_jukebox; 1994 if (!conn) 1995 goto out; 1996 nfsd4_init_conn(rqstp, conn, session); 1997 status = nfs_ok; 1998 out: 1999 nfs4_unlock_state(); 2000 return status; 2001 } 2002 2003 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 2004 { 2005 if (!session) 2006 return 0; 2007 return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); 2008 } 2009 2010 __be32 2011 nfsd4_destroy_session(struct svc_rqst *r, 2012 struct nfsd4_compound_state *cstate, 2013 struct nfsd4_destroy_session *sessionid) 2014 { 2015 struct nfsd4_session *ses; 2016 __be32 status; 2017 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); 2018 2019 nfs4_lock_state(); 2020 status = nfserr_not_only_op; 2021 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 2022 if (!nfsd4_last_compound_op(r)) 2023 goto out; 2024 } 2025 dump_sessionid(__func__, &sessionid->sessionid); 2026 spin_lock(&nn->client_lock); 2027 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); 2028 status = nfserr_badsession; 2029 if (!ses) 2030 goto out_client_lock; 2031 status = mark_session_dead_locked(ses); 2032 if (status) 2033 goto out_client_lock; 2034 unhash_session(ses); 2035 spin_unlock(&nn->client_lock); 2036 2037 nfsd4_probe_callback_sync(ses->se_client); 2038 2039 spin_lock(&nn->client_lock); 2040 free_session(ses); 2041 status = nfs_ok; 2042 out_client_lock: 2043 spin_unlock(&nn->client_lock); 2044 out: 2045 nfs4_unlock_state(); 2046 return status; 2047 } 2048 2049 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 2050 { 2051 struct nfsd4_conn *c; 2052 2053 list_for_each_entry(c, &s->se_conns, cn_persession) { 2054 if (c->cn_xprt == xpt) { 2055 return c; 2056 } 2057 } 2058 return NULL; 2059 } 2060 2061 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 2062 { 2063 struct nfs4_client *clp = ses->se_client; 2064 struct nfsd4_conn *c; 2065 int ret; 2066 2067 spin_lock(&clp->cl_lock); 2068 c = __nfsd4_find_conn(new->cn_xprt, ses); 2069 if (c) { 2070 spin_unlock(&clp->cl_lock); 2071 free_conn(new); 2072 return; 2073 } 2074 __nfsd4_hash_conn(new, ses); 2075 spin_unlock(&clp->cl_lock); 2076 ret = nfsd4_register_conn(new); 2077 if (ret) 2078 /* oops; xprt is already down: */ 2079 nfsd4_conn_lost(&new->cn_xpt_user); 2080 return; 2081 } 2082 2083 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 2084 { 2085 struct nfsd4_compoundargs *args = rqstp->rq_argp; 2086 2087 return args->opcnt > session->se_fchannel.maxops; 2088 } 2089 2090 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 2091 struct nfsd4_session *session) 2092 { 2093 struct xdr_buf *xb = &rqstp->rq_arg; 2094 2095 return xb->len > session->se_fchannel.maxreq_sz; 2096 } 2097 2098 __be32 2099 nfsd4_sequence(struct svc_rqst *rqstp, 2100 struct nfsd4_compound_state *cstate, 2101 struct nfsd4_sequence *seq) 2102 { 2103 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2104 struct nfsd4_session *session; 2105 struct nfs4_client *clp; 2106 struct nfsd4_slot *slot; 2107 struct nfsd4_conn *conn; 2108 __be32 status; 2109 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2110 2111 if (resp->opcnt != 1) 2112 return nfserr_sequence_pos; 2113 2114 /* 2115 * Will be either used or freed by nfsd4_sequence_check_conn 2116 * below. 2117 */ 2118 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 2119 if (!conn) 2120 return nfserr_jukebox; 2121 2122 spin_lock(&nn->client_lock); 2123 status = nfserr_badsession; 2124 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp)); 2125 if (!session) 2126 goto out_no_session; 2127 clp = session->se_client; 2128 status = get_client_locked(clp); 2129 if (status) 2130 goto out_no_session; 2131 status = nfsd4_get_session_locked(session); 2132 if (status) 2133 goto out_put_client; 2134 2135 status = nfserr_too_many_ops; 2136 if (nfsd4_session_too_many_ops(rqstp, session)) 2137 goto out_put_session; 2138 2139 status = nfserr_req_too_big; 2140 if (nfsd4_request_too_big(rqstp, session)) 2141 goto out_put_session; 2142 2143 status = nfserr_badslot; 2144 if (seq->slotid >= session->se_fchannel.maxreqs) 2145 goto out_put_session; 2146 2147 slot = session->se_slots[seq->slotid]; 2148 dprintk("%s: slotid %d\n", __func__, seq->slotid); 2149 2150 /* We do not negotiate the number of slots yet, so set the 2151 * maxslots to the session maxreqs which is used to encode 2152 * sr_highest_slotid and the sr_target_slot id to maxslots */ 2153 seq->maxslots = session->se_fchannel.maxreqs; 2154 2155 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 2156 slot->sl_flags & NFSD4_SLOT_INUSE); 2157 if (status == nfserr_replay_cache) { 2158 status = nfserr_seq_misordered; 2159 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 2160 goto out_put_session; 2161 cstate->slot = slot; 2162 cstate->session = session; 2163 /* Return the cached reply status and set cstate->status 2164 * for nfsd4_proc_compound processing */ 2165 status = nfsd4_replay_cache_entry(resp, seq); 2166 cstate->status = nfserr_replay_cache; 2167 goto out; 2168 } 2169 if (status) 2170 goto out_put_session; 2171 2172 nfsd4_sequence_check_conn(conn, session); 2173 conn = NULL; 2174 2175 /* Success! bump slot seqid */ 2176 slot->sl_seqid = seq->seqid; 2177 slot->sl_flags |= NFSD4_SLOT_INUSE; 2178 if (seq->cachethis) 2179 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 2180 else 2181 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 2182 2183 cstate->slot = slot; 2184 cstate->session = session; 2185 2186 out: 2187 switch (clp->cl_cb_state) { 2188 case NFSD4_CB_DOWN: 2189 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 2190 break; 2191 case NFSD4_CB_FAULT: 2192 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 2193 break; 2194 default: 2195 seq->status_flags = 0; 2196 } 2197 if (!list_empty(&clp->cl_revoked)) 2198 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 2199 out_no_session: 2200 kfree(conn); 2201 spin_unlock(&nn->client_lock); 2202 return status; 2203 out_put_session: 2204 nfsd4_put_session(session); 2205 out_put_client: 2206 put_client_renew_locked(clp); 2207 goto out_no_session; 2208 } 2209 2210 __be32 2211 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 2212 { 2213 struct nfs4_client *conf, *unconf, *clp; 2214 __be32 status = 0; 2215 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2216 2217 nfs4_lock_state(); 2218 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 2219 conf = find_confirmed_client(&dc->clientid, true, nn); 2220 WARN_ON_ONCE(conf && unconf); 2221 2222 if (conf) { 2223 clp = conf; 2224 2225 if (client_has_state(conf)) { 2226 status = nfserr_clientid_busy; 2227 goto out; 2228 } 2229 } else if (unconf) 2230 clp = unconf; 2231 else { 2232 status = nfserr_stale_clientid; 2233 goto out; 2234 } 2235 2236 expire_client(clp); 2237 out: 2238 nfs4_unlock_state(); 2239 return status; 2240 } 2241 2242 __be32 2243 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 2244 { 2245 __be32 status = 0; 2246 2247 if (rc->rca_one_fs) { 2248 if (!cstate->current_fh.fh_dentry) 2249 return nfserr_nofilehandle; 2250 /* 2251 * We don't take advantage of the rca_one_fs case. 2252 * That's OK, it's optional, we can safely ignore it. 2253 */ 2254 return nfs_ok; 2255 } 2256 2257 nfs4_lock_state(); 2258 status = nfserr_complete_already; 2259 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, 2260 &cstate->session->se_client->cl_flags)) 2261 goto out; 2262 2263 status = nfserr_stale_clientid; 2264 if (is_client_expired(cstate->session->se_client)) 2265 /* 2266 * The following error isn't really legal. 2267 * But we only get here if the client just explicitly 2268 * destroyed the client. Surely it no longer cares what 2269 * error it gets back on an operation for the dead 2270 * client. 2271 */ 2272 goto out; 2273 2274 status = nfs_ok; 2275 nfsd4_client_record_create(cstate->session->se_client); 2276 out: 2277 nfs4_unlock_state(); 2278 return status; 2279 } 2280 2281 __be32 2282 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2283 struct nfsd4_setclientid *setclid) 2284 { 2285 struct xdr_netobj clname = setclid->se_name; 2286 nfs4_verifier clverifier = setclid->se_verf; 2287 struct nfs4_client *conf, *unconf, *new; 2288 __be32 status; 2289 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2290 2291 /* Cases below refer to rfc 3530 section 14.2.33: */ 2292 nfs4_lock_state(); 2293 conf = find_confirmed_client_by_name(&clname, nn); 2294 if (conf) { 2295 /* case 0: */ 2296 status = nfserr_clid_inuse; 2297 if (clp_used_exchangeid(conf)) 2298 goto out; 2299 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 2300 char addr_str[INET6_ADDRSTRLEN]; 2301 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, 2302 sizeof(addr_str)); 2303 dprintk("NFSD: setclientid: string in use by client " 2304 "at %s\n", addr_str); 2305 goto out; 2306 } 2307 } 2308 unconf = find_unconfirmed_client_by_name(&clname, nn); 2309 if (unconf) 2310 expire_client(unconf); 2311 status = nfserr_jukebox; 2312 new = create_client(clname, rqstp, &clverifier); 2313 if (new == NULL) 2314 goto out; 2315 if (conf && same_verf(&conf->cl_verifier, &clverifier)) 2316 /* case 1: probable callback update */ 2317 copy_clid(new, conf); 2318 else /* case 4 (new client) or cases 2, 3 (client reboot): */ 2319 gen_clid(new, nn); 2320 new->cl_minorversion = 0; 2321 gen_callback(new, setclid, rqstp); 2322 add_to_unconfirmed(new); 2323 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 2324 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 2325 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 2326 status = nfs_ok; 2327 out: 2328 nfs4_unlock_state(); 2329 return status; 2330 } 2331 2332 2333 __be32 2334 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 2335 struct nfsd4_compound_state *cstate, 2336 struct nfsd4_setclientid_confirm *setclientid_confirm) 2337 { 2338 struct nfs4_client *conf, *unconf; 2339 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 2340 clientid_t * clid = &setclientid_confirm->sc_clientid; 2341 __be32 status; 2342 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2343 2344 if (STALE_CLIENTID(clid, nn)) 2345 return nfserr_stale_clientid; 2346 nfs4_lock_state(); 2347 2348 conf = find_confirmed_client(clid, false, nn); 2349 unconf = find_unconfirmed_client(clid, false, nn); 2350 /* 2351 * We try hard to give out unique clientid's, so if we get an 2352 * attempt to confirm the same clientid with a different cred, 2353 * there's a bug somewhere. Let's charitably assume it's our 2354 * bug. 2355 */ 2356 status = nfserr_serverfault; 2357 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) 2358 goto out; 2359 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) 2360 goto out; 2361 /* cases below refer to rfc 3530 section 14.2.34: */ 2362 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 2363 if (conf && !unconf) /* case 2: probable retransmit */ 2364 status = nfs_ok; 2365 else /* case 4: client hasn't noticed we rebooted yet? */ 2366 status = nfserr_stale_clientid; 2367 goto out; 2368 } 2369 status = nfs_ok; 2370 if (conf) { /* case 1: callback update */ 2371 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 2372 nfsd4_probe_callback(conf); 2373 expire_client(unconf); 2374 } else { /* case 3: normal case; new or rebooted client */ 2375 conf = find_confirmed_client_by_name(&unconf->cl_name, nn); 2376 if (conf) { 2377 status = mark_client_expired(conf); 2378 if (status) 2379 goto out; 2380 expire_client(conf); 2381 } 2382 move_to_confirmed(unconf); 2383 nfsd4_probe_callback(unconf); 2384 } 2385 out: 2386 nfs4_unlock_state(); 2387 return status; 2388 } 2389 2390 static struct nfs4_file *nfsd4_alloc_file(void) 2391 { 2392 return kmem_cache_alloc(file_slab, GFP_KERNEL); 2393 } 2394 2395 /* OPEN Share state helper functions */ 2396 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) 2397 { 2398 unsigned int hashval = file_hashval(ino); 2399 2400 atomic_set(&fp->fi_ref, 1); 2401 INIT_LIST_HEAD(&fp->fi_stateids); 2402 INIT_LIST_HEAD(&fp->fi_delegations); 2403 fp->fi_inode = igrab(ino); 2404 fp->fi_had_conflict = false; 2405 fp->fi_lease = NULL; 2406 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 2407 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 2408 spin_lock(&recall_lock); 2409 hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]); 2410 spin_unlock(&recall_lock); 2411 } 2412 2413 static void 2414 nfsd4_free_slab(struct kmem_cache **slab) 2415 { 2416 if (*slab == NULL) 2417 return; 2418 kmem_cache_destroy(*slab); 2419 *slab = NULL; 2420 } 2421 2422 void 2423 nfsd4_free_slabs(void) 2424 { 2425 nfsd4_free_slab(&openowner_slab); 2426 nfsd4_free_slab(&lockowner_slab); 2427 nfsd4_free_slab(&file_slab); 2428 nfsd4_free_slab(&stateid_slab); 2429 nfsd4_free_slab(&deleg_slab); 2430 } 2431 2432 int 2433 nfsd4_init_slabs(void) 2434 { 2435 openowner_slab = kmem_cache_create("nfsd4_openowners", 2436 sizeof(struct nfs4_openowner), 0, 0, NULL); 2437 if (openowner_slab == NULL) 2438 goto out_nomem; 2439 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 2440 sizeof(struct nfs4_lockowner), 0, 0, NULL); 2441 if (lockowner_slab == NULL) 2442 goto out_nomem; 2443 file_slab = kmem_cache_create("nfsd4_files", 2444 sizeof(struct nfs4_file), 0, 0, NULL); 2445 if (file_slab == NULL) 2446 goto out_nomem; 2447 stateid_slab = kmem_cache_create("nfsd4_stateids", 2448 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 2449 if (stateid_slab == NULL) 2450 goto out_nomem; 2451 deleg_slab = kmem_cache_create("nfsd4_delegations", 2452 sizeof(struct nfs4_delegation), 0, 0, NULL); 2453 if (deleg_slab == NULL) 2454 goto out_nomem; 2455 return 0; 2456 out_nomem: 2457 nfsd4_free_slabs(); 2458 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 2459 return -ENOMEM; 2460 } 2461 2462 void nfs4_free_openowner(struct nfs4_openowner *oo) 2463 { 2464 kfree(oo->oo_owner.so_owner.data); 2465 kmem_cache_free(openowner_slab, oo); 2466 } 2467 2468 void nfs4_free_lockowner(struct nfs4_lockowner *lo) 2469 { 2470 kfree(lo->lo_owner.so_owner.data); 2471 kmem_cache_free(lockowner_slab, lo); 2472 } 2473 2474 static void init_nfs4_replay(struct nfs4_replay *rp) 2475 { 2476 rp->rp_status = nfserr_serverfault; 2477 rp->rp_buflen = 0; 2478 rp->rp_buf = rp->rp_ibuf; 2479 } 2480 2481 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 2482 { 2483 struct nfs4_stateowner *sop; 2484 2485 sop = kmem_cache_alloc(slab, GFP_KERNEL); 2486 if (!sop) 2487 return NULL; 2488 2489 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); 2490 if (!sop->so_owner.data) { 2491 kmem_cache_free(slab, sop); 2492 return NULL; 2493 } 2494 sop->so_owner.len = owner->len; 2495 2496 INIT_LIST_HEAD(&sop->so_stateids); 2497 sop->so_client = clp; 2498 init_nfs4_replay(&sop->so_replay); 2499 return sop; 2500 } 2501 2502 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 2503 { 2504 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2505 2506 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); 2507 list_add(&oo->oo_perclient, &clp->cl_openowners); 2508 } 2509 2510 static struct nfs4_openowner * 2511 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { 2512 struct nfs4_openowner *oo; 2513 2514 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 2515 if (!oo) 2516 return NULL; 2517 oo->oo_owner.so_is_open_owner = 1; 2518 oo->oo_owner.so_seqid = open->op_seqid; 2519 oo->oo_flags = NFS4_OO_NEW; 2520 oo->oo_time = 0; 2521 oo->oo_last_closed_stid = NULL; 2522 INIT_LIST_HEAD(&oo->oo_close_lru); 2523 hash_openowner(oo, clp, strhashval); 2524 return oo; 2525 } 2526 2527 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 2528 struct nfs4_openowner *oo = open->op_openowner; 2529 2530 stp->st_stid.sc_type = NFS4_OPEN_STID; 2531 INIT_LIST_HEAD(&stp->st_lockowners); 2532 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 2533 list_add(&stp->st_perfile, &fp->fi_stateids); 2534 stp->st_stateowner = &oo->oo_owner; 2535 get_nfs4_file(fp); 2536 stp->st_file = fp; 2537 stp->st_access_bmap = 0; 2538 stp->st_deny_bmap = 0; 2539 set_access(open->op_share_access, stp); 2540 set_deny(open->op_share_deny, stp); 2541 stp->st_openstp = NULL; 2542 } 2543 2544 static void 2545 move_to_close_lru(struct nfs4_openowner *oo, struct net *net) 2546 { 2547 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2548 2549 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 2550 2551 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 2552 oo->oo_time = get_seconds(); 2553 } 2554 2555 static int 2556 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, 2557 clientid_t *clid) 2558 { 2559 return (sop->so_owner.len == owner->len) && 2560 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && 2561 (sop->so_client->cl_clientid.cl_id == clid->cl_id); 2562 } 2563 2564 static struct nfs4_openowner * 2565 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 2566 bool sessions, struct nfsd_net *nn) 2567 { 2568 struct nfs4_stateowner *so; 2569 struct nfs4_openowner *oo; 2570 struct nfs4_client *clp; 2571 2572 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) { 2573 if (!so->so_is_open_owner) 2574 continue; 2575 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) { 2576 oo = openowner(so); 2577 clp = oo->oo_owner.so_client; 2578 if ((bool)clp->cl_minorversion != sessions) 2579 return NULL; 2580 renew_client(oo->oo_owner.so_client); 2581 return oo; 2582 } 2583 } 2584 return NULL; 2585 } 2586 2587 /* search file_hashtbl[] for file */ 2588 static struct nfs4_file * 2589 find_file(struct inode *ino) 2590 { 2591 unsigned int hashval = file_hashval(ino); 2592 struct nfs4_file *fp; 2593 2594 spin_lock(&recall_lock); 2595 hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 2596 if (fp->fi_inode == ino) { 2597 get_nfs4_file(fp); 2598 spin_unlock(&recall_lock); 2599 return fp; 2600 } 2601 } 2602 spin_unlock(&recall_lock); 2603 return NULL; 2604 } 2605 2606 /* 2607 * Called to check deny when READ with all zero stateid or 2608 * WRITE with all zero or all one stateid 2609 */ 2610 static __be32 2611 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 2612 { 2613 struct inode *ino = current_fh->fh_dentry->d_inode; 2614 struct nfs4_file *fp; 2615 struct nfs4_ol_stateid *stp; 2616 __be32 ret; 2617 2618 fp = find_file(ino); 2619 if (!fp) 2620 return nfs_ok; 2621 ret = nfserr_locked; 2622 /* Search for conflicting share reservations */ 2623 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 2624 if (test_deny(deny_type, stp) || 2625 test_deny(NFS4_SHARE_DENY_BOTH, stp)) 2626 goto out; 2627 } 2628 ret = nfs_ok; 2629 out: 2630 put_nfs4_file(fp); 2631 return ret; 2632 } 2633 2634 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 2635 { 2636 struct nfs4_client *clp = dp->dl_stid.sc_client; 2637 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2638 2639 /* We're assuming the state code never drops its reference 2640 * without first removing the lease. Since we're in this lease 2641 * callback (and since the lease code is serialized by the kernel 2642 * lock) we know the server hasn't removed the lease yet, we know 2643 * it's safe to take a reference: */ 2644 atomic_inc(&dp->dl_count); 2645 2646 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 2647 2648 /* Only place dl_time is set; protected by i_lock: */ 2649 dp->dl_time = get_seconds(); 2650 2651 nfsd4_cb_recall(dp); 2652 } 2653 2654 /* Called from break_lease() with i_lock held. */ 2655 static void nfsd_break_deleg_cb(struct file_lock *fl) 2656 { 2657 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; 2658 struct nfs4_delegation *dp; 2659 2660 if (!fp) { 2661 WARN(1, "(%p)->fl_owner NULL\n", fl); 2662 return; 2663 } 2664 if (fp->fi_had_conflict) { 2665 WARN(1, "duplicate break on %p\n", fp); 2666 return; 2667 } 2668 /* 2669 * We don't want the locks code to timeout the lease for us; 2670 * we'll remove it ourself if a delegation isn't returned 2671 * in time: 2672 */ 2673 fl->fl_break_time = 0; 2674 2675 spin_lock(&recall_lock); 2676 fp->fi_had_conflict = true; 2677 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2678 nfsd_break_one_deleg(dp); 2679 spin_unlock(&recall_lock); 2680 } 2681 2682 static 2683 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) 2684 { 2685 if (arg & F_UNLCK) 2686 return lease_modify(onlist, arg); 2687 else 2688 return -EAGAIN; 2689 } 2690 2691 static const struct lock_manager_operations nfsd_lease_mng_ops = { 2692 .lm_break = nfsd_break_deleg_cb, 2693 .lm_change = nfsd_change_deleg_cb, 2694 }; 2695 2696 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 2697 { 2698 if (nfsd4_has_session(cstate)) 2699 return nfs_ok; 2700 if (seqid == so->so_seqid - 1) 2701 return nfserr_replay_me; 2702 if (seqid == so->so_seqid) 2703 return nfs_ok; 2704 return nfserr_bad_seqid; 2705 } 2706 2707 __be32 2708 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 2709 struct nfsd4_open *open, struct nfsd_net *nn) 2710 { 2711 clientid_t *clientid = &open->op_clientid; 2712 struct nfs4_client *clp = NULL; 2713 unsigned int strhashval; 2714 struct nfs4_openowner *oo = NULL; 2715 __be32 status; 2716 2717 if (STALE_CLIENTID(&open->op_clientid, nn)) 2718 return nfserr_stale_clientid; 2719 /* 2720 * In case we need it later, after we've already created the 2721 * file and don't want to risk a further failure: 2722 */ 2723 open->op_file = nfsd4_alloc_file(); 2724 if (open->op_file == NULL) 2725 return nfserr_jukebox; 2726 2727 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner); 2728 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn); 2729 open->op_openowner = oo; 2730 if (!oo) { 2731 clp = find_confirmed_client(clientid, cstate->minorversion, 2732 nn); 2733 if (clp == NULL) 2734 return nfserr_expired; 2735 goto new_owner; 2736 } 2737 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 2738 /* Replace unconfirmed owners without checking for replay. */ 2739 clp = oo->oo_owner.so_client; 2740 release_openowner(oo); 2741 open->op_openowner = NULL; 2742 goto new_owner; 2743 } 2744 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 2745 if (status) 2746 return status; 2747 clp = oo->oo_owner.so_client; 2748 goto alloc_stateid; 2749 new_owner: 2750 oo = alloc_init_open_stateowner(strhashval, clp, open); 2751 if (oo == NULL) 2752 return nfserr_jukebox; 2753 open->op_openowner = oo; 2754 alloc_stateid: 2755 open->op_stp = nfs4_alloc_stateid(clp); 2756 if (!open->op_stp) 2757 return nfserr_jukebox; 2758 return nfs_ok; 2759 } 2760 2761 static inline __be32 2762 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 2763 { 2764 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 2765 return nfserr_openmode; 2766 else 2767 return nfs_ok; 2768 } 2769 2770 static int share_access_to_flags(u32 share_access) 2771 { 2772 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 2773 } 2774 2775 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 2776 { 2777 struct nfs4_stid *ret; 2778 2779 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); 2780 if (!ret) 2781 return NULL; 2782 return delegstateid(ret); 2783 } 2784 2785 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 2786 { 2787 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 2788 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 2789 } 2790 2791 static __be32 2792 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 2793 struct nfs4_delegation **dp) 2794 { 2795 int flags; 2796 __be32 status = nfserr_bad_stateid; 2797 2798 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid); 2799 if (*dp == NULL) 2800 goto out; 2801 flags = share_access_to_flags(open->op_share_access); 2802 status = nfs4_check_delegmode(*dp, flags); 2803 if (status) 2804 *dp = NULL; 2805 out: 2806 if (!nfsd4_is_deleg_cur(open)) 2807 return nfs_ok; 2808 if (status) 2809 return status; 2810 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2811 return nfs_ok; 2812 } 2813 2814 static __be32 2815 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp) 2816 { 2817 struct nfs4_ol_stateid *local; 2818 struct nfs4_openowner *oo = open->op_openowner; 2819 2820 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 2821 /* ignore lock owners */ 2822 if (local->st_stateowner->so_is_open_owner == 0) 2823 continue; 2824 /* remember if we have seen this open owner */ 2825 if (local->st_stateowner == &oo->oo_owner) 2826 *stpp = local; 2827 /* check for conflicting share reservations */ 2828 if (!test_share(local, open)) 2829 return nfserr_share_denied; 2830 } 2831 return nfs_ok; 2832 } 2833 2834 static inline int nfs4_access_to_access(u32 nfs4_access) 2835 { 2836 int flags = 0; 2837 2838 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 2839 flags |= NFSD_MAY_READ; 2840 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 2841 flags |= NFSD_MAY_WRITE; 2842 return flags; 2843 } 2844 2845 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 2846 struct svc_fh *cur_fh, struct nfsd4_open *open) 2847 { 2848 __be32 status; 2849 int oflag = nfs4_access_to_omode(open->op_share_access); 2850 int access = nfs4_access_to_access(open->op_share_access); 2851 2852 if (!fp->fi_fds[oflag]) { 2853 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, 2854 &fp->fi_fds[oflag]); 2855 if (status) 2856 return status; 2857 } 2858 nfs4_file_get_access(fp, oflag); 2859 2860 return nfs_ok; 2861 } 2862 2863 static inline __be32 2864 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 2865 struct nfsd4_open *open) 2866 { 2867 struct iattr iattr = { 2868 .ia_valid = ATTR_SIZE, 2869 .ia_size = 0, 2870 }; 2871 if (!open->op_truncate) 2872 return 0; 2873 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 2874 return nfserr_inval; 2875 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 2876 } 2877 2878 static __be32 2879 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 2880 { 2881 u32 op_share_access = open->op_share_access; 2882 bool new_access; 2883 __be32 status; 2884 2885 new_access = !test_access(op_share_access, stp); 2886 if (new_access) { 2887 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); 2888 if (status) 2889 return status; 2890 } 2891 status = nfsd4_truncate(rqstp, cur_fh, open); 2892 if (status) { 2893 if (new_access) { 2894 int oflag = nfs4_access_to_omode(op_share_access); 2895 nfs4_file_put_access(fp, oflag); 2896 } 2897 return status; 2898 } 2899 /* remember the open */ 2900 set_access(op_share_access, stp); 2901 set_deny(open->op_share_deny, stp); 2902 2903 return nfs_ok; 2904 } 2905 2906 2907 static void 2908 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) 2909 { 2910 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2911 } 2912 2913 /* Should we give out recallable state?: */ 2914 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 2915 { 2916 if (clp->cl_cb_state == NFSD4_CB_UP) 2917 return true; 2918 /* 2919 * In the sessions case, since we don't have to establish a 2920 * separate connection for callbacks, we assume it's OK 2921 * until we hear otherwise: 2922 */ 2923 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 2924 } 2925 2926 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) 2927 { 2928 struct file_lock *fl; 2929 2930 fl = locks_alloc_lock(); 2931 if (!fl) 2932 return NULL; 2933 locks_init_lock(fl); 2934 fl->fl_lmops = &nfsd_lease_mng_ops; 2935 fl->fl_flags = FL_LEASE; 2936 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 2937 fl->fl_end = OFFSET_MAX; 2938 fl->fl_owner = (fl_owner_t)(dp->dl_file); 2939 fl->fl_pid = current->tgid; 2940 return fl; 2941 } 2942 2943 static int nfs4_setlease(struct nfs4_delegation *dp, int flag) 2944 { 2945 struct nfs4_file *fp = dp->dl_file; 2946 struct file_lock *fl; 2947 int status; 2948 2949 fl = nfs4_alloc_init_lease(dp, flag); 2950 if (!fl) 2951 return -ENOMEM; 2952 fl->fl_file = find_readable_file(fp); 2953 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 2954 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); 2955 if (status) { 2956 list_del_init(&dp->dl_perclnt); 2957 locks_free_lock(fl); 2958 return -ENOMEM; 2959 } 2960 fp->fi_lease = fl; 2961 fp->fi_deleg_file = get_file(fl->fl_file); 2962 atomic_set(&fp->fi_delegees, 1); 2963 list_add(&dp->dl_perfile, &fp->fi_delegations); 2964 return 0; 2965 } 2966 2967 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) 2968 { 2969 struct nfs4_file *fp = dp->dl_file; 2970 2971 if (!fp->fi_lease) 2972 return nfs4_setlease(dp, flag); 2973 spin_lock(&recall_lock); 2974 if (fp->fi_had_conflict) { 2975 spin_unlock(&recall_lock); 2976 return -EAGAIN; 2977 } 2978 atomic_inc(&fp->fi_delegees); 2979 list_add(&dp->dl_perfile, &fp->fi_delegations); 2980 spin_unlock(&recall_lock); 2981 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 2982 return 0; 2983 } 2984 2985 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 2986 { 2987 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 2988 if (status == -EAGAIN) 2989 open->op_why_no_deleg = WND4_CONTENTION; 2990 else { 2991 open->op_why_no_deleg = WND4_RESOURCE; 2992 switch (open->op_deleg_want) { 2993 case NFS4_SHARE_WANT_READ_DELEG: 2994 case NFS4_SHARE_WANT_WRITE_DELEG: 2995 case NFS4_SHARE_WANT_ANY_DELEG: 2996 break; 2997 case NFS4_SHARE_WANT_CANCEL: 2998 open->op_why_no_deleg = WND4_CANCELLED; 2999 break; 3000 case NFS4_SHARE_WANT_NO_DELEG: 3001 WARN_ON_ONCE(1); 3002 } 3003 } 3004 } 3005 3006 /* 3007 * Attempt to hand out a delegation. 3008 */ 3009 static void 3010 nfs4_open_delegation(struct net *net, struct svc_fh *fh, 3011 struct nfsd4_open *open, struct nfs4_ol_stateid *stp) 3012 { 3013 struct nfs4_delegation *dp; 3014 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); 3015 int cb_up; 3016 int status = 0, flag = 0; 3017 3018 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 3019 flag = NFS4_OPEN_DELEGATE_NONE; 3020 open->op_recall = 0; 3021 switch (open->op_claim_type) { 3022 case NFS4_OPEN_CLAIM_PREVIOUS: 3023 if (!cb_up) 3024 open->op_recall = 1; 3025 flag = open->op_delegate_type; 3026 if (flag == NFS4_OPEN_DELEGATE_NONE) 3027 goto out; 3028 break; 3029 case NFS4_OPEN_CLAIM_NULL: 3030 /* Let's not give out any delegations till everyone's 3031 * had the chance to reclaim theirs.... */ 3032 if (locks_in_grace(net)) 3033 goto out; 3034 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 3035 goto out; 3036 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 3037 flag = NFS4_OPEN_DELEGATE_WRITE; 3038 else 3039 flag = NFS4_OPEN_DELEGATE_READ; 3040 break; 3041 default: 3042 goto out; 3043 } 3044 3045 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag); 3046 if (dp == NULL) 3047 goto out_no_deleg; 3048 status = nfs4_set_delegation(dp, flag); 3049 if (status) 3050 goto out_free; 3051 3052 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 3053 3054 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 3055 STATEID_VAL(&dp->dl_stid.sc_stateid)); 3056 out: 3057 open->op_delegate_type = flag; 3058 if (flag == NFS4_OPEN_DELEGATE_NONE) { 3059 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 3060 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) 3061 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 3062 3063 /* 4.1 client asking for a delegation? */ 3064 if (open->op_deleg_want) 3065 nfsd4_open_deleg_none_ext(open, status); 3066 } 3067 return; 3068 out_free: 3069 unhash_stid(&dp->dl_stid); 3070 nfs4_put_delegation(dp); 3071 out_no_deleg: 3072 flag = NFS4_OPEN_DELEGATE_NONE; 3073 goto out; 3074 } 3075 3076 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 3077 struct nfs4_delegation *dp) 3078 { 3079 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 3080 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 3081 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3082 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 3083 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 3084 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 3085 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3086 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 3087 } 3088 /* Otherwise the client must be confused wanting a delegation 3089 * it already has, therefore we don't return 3090 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 3091 */ 3092 } 3093 3094 /* 3095 * called with nfs4_lock_state() held. 3096 */ 3097 __be32 3098 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 3099 { 3100 struct nfsd4_compoundres *resp = rqstp->rq_resp; 3101 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 3102 struct nfs4_file *fp = NULL; 3103 struct inode *ino = current_fh->fh_dentry->d_inode; 3104 struct nfs4_ol_stateid *stp = NULL; 3105 struct nfs4_delegation *dp = NULL; 3106 __be32 status; 3107 3108 /* 3109 * Lookup file; if found, lookup stateid and check open request, 3110 * and check for delegations in the process of being recalled. 3111 * If not found, create the nfs4_file struct 3112 */ 3113 fp = find_file(ino); 3114 if (fp) { 3115 if ((status = nfs4_check_open(fp, open, &stp))) 3116 goto out; 3117 status = nfs4_check_deleg(cl, open, &dp); 3118 if (status) 3119 goto out; 3120 } else { 3121 status = nfserr_bad_stateid; 3122 if (nfsd4_is_deleg_cur(open)) 3123 goto out; 3124 status = nfserr_jukebox; 3125 fp = open->op_file; 3126 open->op_file = NULL; 3127 nfsd4_init_file(fp, ino); 3128 } 3129 3130 /* 3131 * OPEN the file, or upgrade an existing OPEN. 3132 * If truncate fails, the OPEN fails. 3133 */ 3134 if (stp) { 3135 /* Stateid was found, this is an OPEN upgrade */ 3136 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 3137 if (status) 3138 goto out; 3139 } else { 3140 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open); 3141 if (status) 3142 goto out; 3143 status = nfsd4_truncate(rqstp, current_fh, open); 3144 if (status) 3145 goto out; 3146 stp = open->op_stp; 3147 open->op_stp = NULL; 3148 init_open_stateid(stp, fp, open); 3149 } 3150 update_stateid(&stp->st_stid.sc_stateid); 3151 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3152 3153 if (nfsd4_has_session(&resp->cstate)) { 3154 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3155 3156 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 3157 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3158 open->op_why_no_deleg = WND4_NOT_WANTED; 3159 goto nodeleg; 3160 } 3161 } 3162 3163 /* 3164 * Attempt to hand out a delegation. No error return, because the 3165 * OPEN succeeds even if we fail. 3166 */ 3167 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp); 3168 nodeleg: 3169 status = nfs_ok; 3170 3171 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 3172 STATEID_VAL(&stp->st_stid.sc_stateid)); 3173 out: 3174 /* 4.1 client trying to upgrade/downgrade delegation? */ 3175 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 3176 open->op_deleg_want) 3177 nfsd4_deleg_xgrade_none_ext(open, dp); 3178 3179 if (fp) 3180 put_nfs4_file(fp); 3181 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 3182 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate)); 3183 /* 3184 * To finish the open response, we just need to set the rflags. 3185 */ 3186 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 3187 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && 3188 !nfsd4_has_session(&resp->cstate)) 3189 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 3190 3191 return status; 3192 } 3193 3194 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status) 3195 { 3196 if (open->op_openowner) { 3197 struct nfs4_openowner *oo = open->op_openowner; 3198 3199 if (!list_empty(&oo->oo_owner.so_stateids)) 3200 list_del_init(&oo->oo_close_lru); 3201 if (oo->oo_flags & NFS4_OO_NEW) { 3202 if (status) { 3203 release_openowner(oo); 3204 open->op_openowner = NULL; 3205 } else 3206 oo->oo_flags &= ~NFS4_OO_NEW; 3207 } 3208 } 3209 if (open->op_file) 3210 nfsd4_free_file(open->op_file); 3211 if (open->op_stp) 3212 free_generic_stateid(open->op_stp); 3213 } 3214 3215 static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp) 3216 { 3217 struct nfs4_client *found; 3218 3219 if (STALE_CLIENTID(clid, nn)) 3220 return nfserr_stale_clientid; 3221 found = find_confirmed_client(clid, session, nn); 3222 if (clp) 3223 *clp = found; 3224 return found ? nfs_ok : nfserr_expired; 3225 } 3226 3227 __be32 3228 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3229 clientid_t *clid) 3230 { 3231 struct nfs4_client *clp; 3232 __be32 status; 3233 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3234 3235 nfs4_lock_state(); 3236 dprintk("process_renew(%08x/%08x): starting\n", 3237 clid->cl_boot, clid->cl_id); 3238 status = lookup_clientid(clid, cstate->minorversion, nn, &clp); 3239 if (status) 3240 goto out; 3241 status = nfserr_cb_path_down; 3242 if (!list_empty(&clp->cl_delegations) 3243 && clp->cl_cb_state != NFSD4_CB_UP) 3244 goto out; 3245 status = nfs_ok; 3246 out: 3247 nfs4_unlock_state(); 3248 return status; 3249 } 3250 3251 static void 3252 nfsd4_end_grace(struct nfsd_net *nn) 3253 { 3254 /* do nothing if grace period already ended */ 3255 if (nn->grace_ended) 3256 return; 3257 3258 dprintk("NFSD: end of grace period\n"); 3259 nn->grace_ended = true; 3260 nfsd4_record_grace_done(nn, nn->boot_time); 3261 locks_end_grace(&nn->nfsd4_manager); 3262 /* 3263 * Now that every NFSv4 client has had the chance to recover and 3264 * to see the (possibly new, possibly shorter) lease time, we 3265 * can safely set the next grace time to the current lease time: 3266 */ 3267 nn->nfsd4_grace = nn->nfsd4_lease; 3268 } 3269 3270 static time_t 3271 nfs4_laundromat(struct nfsd_net *nn) 3272 { 3273 struct nfs4_client *clp; 3274 struct nfs4_openowner *oo; 3275 struct nfs4_delegation *dp; 3276 struct list_head *pos, *next, reaplist; 3277 time_t cutoff = get_seconds() - nn->nfsd4_lease; 3278 time_t t, clientid_val = nn->nfsd4_lease; 3279 time_t u, test_val = nn->nfsd4_lease; 3280 3281 nfs4_lock_state(); 3282 3283 dprintk("NFSD: laundromat service - starting\n"); 3284 nfsd4_end_grace(nn); 3285 INIT_LIST_HEAD(&reaplist); 3286 spin_lock(&nn->client_lock); 3287 list_for_each_safe(pos, next, &nn->client_lru) { 3288 clp = list_entry(pos, struct nfs4_client, cl_lru); 3289 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 3290 t = clp->cl_time - cutoff; 3291 if (clientid_val > t) 3292 clientid_val = t; 3293 break; 3294 } 3295 if (mark_client_expired_locked(clp)) { 3296 dprintk("NFSD: client in use (clientid %08x)\n", 3297 clp->cl_clientid.cl_id); 3298 continue; 3299 } 3300 list_move(&clp->cl_lru, &reaplist); 3301 } 3302 spin_unlock(&nn->client_lock); 3303 list_for_each_safe(pos, next, &reaplist) { 3304 clp = list_entry(pos, struct nfs4_client, cl_lru); 3305 dprintk("NFSD: purging unused client (clientid %08x)\n", 3306 clp->cl_clientid.cl_id); 3307 expire_client(clp); 3308 } 3309 spin_lock(&recall_lock); 3310 list_for_each_safe(pos, next, &nn->del_recall_lru) { 3311 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3312 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn) 3313 continue; 3314 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 3315 u = dp->dl_time - cutoff; 3316 if (test_val > u) 3317 test_val = u; 3318 break; 3319 } 3320 list_move(&dp->dl_recall_lru, &reaplist); 3321 } 3322 spin_unlock(&recall_lock); 3323 list_for_each_safe(pos, next, &reaplist) { 3324 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3325 revoke_delegation(dp); 3326 } 3327 test_val = nn->nfsd4_lease; 3328 list_for_each_safe(pos, next, &nn->close_lru) { 3329 oo = container_of(pos, struct nfs4_openowner, oo_close_lru); 3330 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { 3331 u = oo->oo_time - cutoff; 3332 if (test_val > u) 3333 test_val = u; 3334 break; 3335 } 3336 release_openowner(oo); 3337 } 3338 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT) 3339 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT; 3340 nfs4_unlock_state(); 3341 return clientid_val; 3342 } 3343 3344 static struct workqueue_struct *laundry_wq; 3345 static void laundromat_main(struct work_struct *); 3346 3347 static void 3348 laundromat_main(struct work_struct *laundry) 3349 { 3350 time_t t; 3351 struct delayed_work *dwork = container_of(laundry, struct delayed_work, 3352 work); 3353 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 3354 laundromat_work); 3355 3356 t = nfs4_laundromat(nn); 3357 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 3358 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 3359 } 3360 3361 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 3362 { 3363 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode) 3364 return nfserr_bad_stateid; 3365 return nfs_ok; 3366 } 3367 3368 static inline int 3369 access_permit_read(struct nfs4_ol_stateid *stp) 3370 { 3371 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 3372 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 3373 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 3374 } 3375 3376 static inline int 3377 access_permit_write(struct nfs4_ol_stateid *stp) 3378 { 3379 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 3380 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 3381 } 3382 3383 static 3384 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 3385 { 3386 __be32 status = nfserr_openmode; 3387 3388 /* For lock stateid's, we test the parent open, not the lock: */ 3389 if (stp->st_openstp) 3390 stp = stp->st_openstp; 3391 if ((flags & WR_STATE) && !access_permit_write(stp)) 3392 goto out; 3393 if ((flags & RD_STATE) && !access_permit_read(stp)) 3394 goto out; 3395 status = nfs_ok; 3396 out: 3397 return status; 3398 } 3399 3400 static inline __be32 3401 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 3402 { 3403 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 3404 return nfs_ok; 3405 else if (locks_in_grace(net)) { 3406 /* Answer in remaining cases depends on existence of 3407 * conflicting state; so we must wait out the grace period. */ 3408 return nfserr_grace; 3409 } else if (flags & WR_STATE) 3410 return nfs4_share_conflict(current_fh, 3411 NFS4_SHARE_DENY_WRITE); 3412 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 3413 return nfs4_share_conflict(current_fh, 3414 NFS4_SHARE_DENY_READ); 3415 } 3416 3417 /* 3418 * Allow READ/WRITE during grace period on recovered state only for files 3419 * that are not able to provide mandatory locking. 3420 */ 3421 static inline int 3422 grace_disallows_io(struct net *net, struct inode *inode) 3423 { 3424 return locks_in_grace(net) && mandatory_lock(inode); 3425 } 3426 3427 /* Returns true iff a is later than b: */ 3428 static bool stateid_generation_after(stateid_t *a, stateid_t *b) 3429 { 3430 return (s32)a->si_generation - (s32)b->si_generation > 0; 3431 } 3432 3433 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 3434 { 3435 /* 3436 * When sessions are used the stateid generation number is ignored 3437 * when it is zero. 3438 */ 3439 if (has_session && in->si_generation == 0) 3440 return nfs_ok; 3441 3442 if (in->si_generation == ref->si_generation) 3443 return nfs_ok; 3444 3445 /* If the client sends us a stateid from the future, it's buggy: */ 3446 if (stateid_generation_after(in, ref)) 3447 return nfserr_bad_stateid; 3448 /* 3449 * However, we could see a stateid from the past, even from a 3450 * non-buggy client. For example, if the client sends a lock 3451 * while some IO is outstanding, the lock may bump si_generation 3452 * while the IO is still in flight. The client could avoid that 3453 * situation by waiting for responses on all the IO requests, 3454 * but better performance may result in retrying IO that 3455 * receives an old_stateid error if requests are rarely 3456 * reordered in flight: 3457 */ 3458 return nfserr_old_stateid; 3459 } 3460 3461 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 3462 { 3463 struct nfs4_stid *s; 3464 struct nfs4_ol_stateid *ols; 3465 __be32 status; 3466 3467 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3468 return nfserr_bad_stateid; 3469 /* Client debugging aid. */ 3470 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 3471 char addr_str[INET6_ADDRSTRLEN]; 3472 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, 3473 sizeof(addr_str)); 3474 pr_warn_ratelimited("NFSD: client %s testing state ID " 3475 "with incorrect client ID\n", addr_str); 3476 return nfserr_bad_stateid; 3477 } 3478 s = find_stateid(cl, stateid); 3479 if (!s) 3480 return nfserr_bad_stateid; 3481 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 3482 if (status) 3483 return status; 3484 switch (s->sc_type) { 3485 case NFS4_DELEG_STID: 3486 return nfs_ok; 3487 case NFS4_REVOKED_DELEG_STID: 3488 return nfserr_deleg_revoked; 3489 case NFS4_OPEN_STID: 3490 case NFS4_LOCK_STID: 3491 ols = openlockstateid(s); 3492 if (ols->st_stateowner->so_is_open_owner 3493 && !(openowner(ols->st_stateowner)->oo_flags 3494 & NFS4_OO_CONFIRMED)) 3495 return nfserr_bad_stateid; 3496 return nfs_ok; 3497 default: 3498 printk("unknown stateid type %x\n", s->sc_type); 3499 case NFS4_CLOSED_STID: 3500 return nfserr_bad_stateid; 3501 } 3502 } 3503 3504 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, 3505 struct nfs4_stid **s, bool sessions, 3506 struct nfsd_net *nn) 3507 { 3508 struct nfs4_client *cl; 3509 __be32 status; 3510 3511 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3512 return nfserr_bad_stateid; 3513 status = lookup_clientid(&stateid->si_opaque.so_clid, sessions, 3514 nn, &cl); 3515 if (status == nfserr_stale_clientid) 3516 return nfserr_stale_stateid; 3517 if (status) 3518 return status; 3519 *s = find_stateid_by_type(cl, stateid, typemask); 3520 if (!*s) 3521 return nfserr_bad_stateid; 3522 return nfs_ok; 3523 } 3524 3525 /* 3526 * Checks for stateid operations 3527 */ 3528 __be32 3529 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, 3530 stateid_t *stateid, int flags, struct file **filpp) 3531 { 3532 struct nfs4_stid *s; 3533 struct nfs4_ol_stateid *stp = NULL; 3534 struct nfs4_delegation *dp = NULL; 3535 struct svc_fh *current_fh = &cstate->current_fh; 3536 struct inode *ino = current_fh->fh_dentry->d_inode; 3537 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3538 __be32 status; 3539 3540 if (filpp) 3541 *filpp = NULL; 3542 3543 if (grace_disallows_io(net, ino)) 3544 return nfserr_grace; 3545 3546 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3547 return check_special_stateids(net, current_fh, stateid, flags); 3548 3549 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 3550 &s, cstate->minorversion, nn); 3551 if (status) 3552 return status; 3553 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); 3554 if (status) 3555 goto out; 3556 switch (s->sc_type) { 3557 case NFS4_DELEG_STID: 3558 dp = delegstateid(s); 3559 status = nfs4_check_delegmode(dp, flags); 3560 if (status) 3561 goto out; 3562 if (filpp) { 3563 *filpp = dp->dl_file->fi_deleg_file; 3564 if (!*filpp) { 3565 WARN_ON_ONCE(1); 3566 status = nfserr_serverfault; 3567 goto out; 3568 } 3569 } 3570 break; 3571 case NFS4_OPEN_STID: 3572 case NFS4_LOCK_STID: 3573 stp = openlockstateid(s); 3574 status = nfs4_check_fh(current_fh, stp); 3575 if (status) 3576 goto out; 3577 if (stp->st_stateowner->so_is_open_owner 3578 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 3579 goto out; 3580 status = nfs4_check_openmode(stp, flags); 3581 if (status) 3582 goto out; 3583 if (filpp) { 3584 if (flags & RD_STATE) 3585 *filpp = find_readable_file(stp->st_file); 3586 else 3587 *filpp = find_writeable_file(stp->st_file); 3588 } 3589 break; 3590 default: 3591 return nfserr_bad_stateid; 3592 } 3593 status = nfs_ok; 3594 out: 3595 return status; 3596 } 3597 3598 static __be32 3599 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3600 { 3601 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3602 return nfserr_locks_held; 3603 release_lock_stateid(stp); 3604 return nfs_ok; 3605 } 3606 3607 /* 3608 * Test if the stateid is valid 3609 */ 3610 __be32 3611 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3612 struct nfsd4_test_stateid *test_stateid) 3613 { 3614 struct nfsd4_test_stateid_id *stateid; 3615 struct nfs4_client *cl = cstate->session->se_client; 3616 3617 nfs4_lock_state(); 3618 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 3619 stateid->ts_id_status = 3620 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 3621 nfs4_unlock_state(); 3622 3623 return nfs_ok; 3624 } 3625 3626 __be32 3627 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3628 struct nfsd4_free_stateid *free_stateid) 3629 { 3630 stateid_t *stateid = &free_stateid->fr_stateid; 3631 struct nfs4_stid *s; 3632 struct nfs4_delegation *dp; 3633 struct nfs4_client *cl = cstate->session->se_client; 3634 __be32 ret = nfserr_bad_stateid; 3635 3636 nfs4_lock_state(); 3637 s = find_stateid(cl, stateid); 3638 if (!s) 3639 goto out; 3640 switch (s->sc_type) { 3641 case NFS4_DELEG_STID: 3642 ret = nfserr_locks_held; 3643 goto out; 3644 case NFS4_OPEN_STID: 3645 case NFS4_LOCK_STID: 3646 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 3647 if (ret) 3648 goto out; 3649 if (s->sc_type == NFS4_LOCK_STID) 3650 ret = nfsd4_free_lock_stateid(openlockstateid(s)); 3651 else 3652 ret = nfserr_locks_held; 3653 break; 3654 case NFS4_REVOKED_DELEG_STID: 3655 dp = delegstateid(s); 3656 destroy_revoked_delegation(dp); 3657 ret = nfs_ok; 3658 break; 3659 default: 3660 ret = nfserr_bad_stateid; 3661 } 3662 out: 3663 nfs4_unlock_state(); 3664 return ret; 3665 } 3666 3667 static inline int 3668 setlkflg (int type) 3669 { 3670 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 3671 RD_STATE : WR_STATE; 3672 } 3673 3674 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 3675 { 3676 struct svc_fh *current_fh = &cstate->current_fh; 3677 struct nfs4_stateowner *sop = stp->st_stateowner; 3678 __be32 status; 3679 3680 status = nfsd4_check_seqid(cstate, sop, seqid); 3681 if (status) 3682 return status; 3683 if (stp->st_stid.sc_type == NFS4_CLOSED_STID 3684 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) 3685 /* 3686 * "Closed" stateid's exist *only* to return 3687 * nfserr_replay_me from the previous step, and 3688 * revoked delegations are kept only for free_stateid. 3689 */ 3690 return nfserr_bad_stateid; 3691 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 3692 if (status) 3693 return status; 3694 return nfs4_check_fh(current_fh, stp); 3695 } 3696 3697 /* 3698 * Checks for sequence id mutating operations. 3699 */ 3700 static __be32 3701 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 3702 stateid_t *stateid, char typemask, 3703 struct nfs4_ol_stateid **stpp, 3704 struct nfsd_net *nn) 3705 { 3706 __be32 status; 3707 struct nfs4_stid *s; 3708 3709 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 3710 seqid, STATEID_VAL(stateid)); 3711 3712 *stpp = NULL; 3713 status = nfsd4_lookup_stateid(stateid, typemask, &s, 3714 cstate->minorversion, nn); 3715 if (status) 3716 return status; 3717 *stpp = openlockstateid(s); 3718 if (!nfsd4_has_session(cstate)) 3719 cstate->replay_owner = (*stpp)->st_stateowner; 3720 3721 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); 3722 } 3723 3724 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 3725 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 3726 { 3727 __be32 status; 3728 struct nfs4_openowner *oo; 3729 3730 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 3731 NFS4_OPEN_STID, stpp, nn); 3732 if (status) 3733 return status; 3734 oo = openowner((*stpp)->st_stateowner); 3735 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) 3736 return nfserr_bad_stateid; 3737 return nfs_ok; 3738 } 3739 3740 __be32 3741 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3742 struct nfsd4_open_confirm *oc) 3743 { 3744 __be32 status; 3745 struct nfs4_openowner *oo; 3746 struct nfs4_ol_stateid *stp; 3747 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3748 3749 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 3750 (int)cstate->current_fh.fh_dentry->d_name.len, 3751 cstate->current_fh.fh_dentry->d_name.name); 3752 3753 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 3754 if (status) 3755 return status; 3756 3757 nfs4_lock_state(); 3758 3759 status = nfs4_preprocess_seqid_op(cstate, 3760 oc->oc_seqid, &oc->oc_req_stateid, 3761 NFS4_OPEN_STID, &stp, nn); 3762 if (status) 3763 goto out; 3764 oo = openowner(stp->st_stateowner); 3765 status = nfserr_bad_stateid; 3766 if (oo->oo_flags & NFS4_OO_CONFIRMED) 3767 goto out; 3768 oo->oo_flags |= NFS4_OO_CONFIRMED; 3769 update_stateid(&stp->st_stid.sc_stateid); 3770 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3771 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 3772 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 3773 3774 nfsd4_client_record_create(oo->oo_owner.so_client); 3775 status = nfs_ok; 3776 out: 3777 nfsd4_bump_seqid(cstate, status); 3778 if (!cstate->replay_owner) 3779 nfs4_unlock_state(); 3780 return status; 3781 } 3782 3783 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 3784 { 3785 if (!test_access(access, stp)) 3786 return; 3787 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); 3788 clear_access(access, stp); 3789 } 3790 3791 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 3792 { 3793 switch (to_access) { 3794 case NFS4_SHARE_ACCESS_READ: 3795 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 3796 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 3797 break; 3798 case NFS4_SHARE_ACCESS_WRITE: 3799 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 3800 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 3801 break; 3802 case NFS4_SHARE_ACCESS_BOTH: 3803 break; 3804 default: 3805 WARN_ON_ONCE(1); 3806 } 3807 } 3808 3809 static void 3810 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp) 3811 { 3812 int i; 3813 for (i = 0; i < 4; i++) { 3814 if ((i & deny) != i) 3815 clear_deny(i, stp); 3816 } 3817 } 3818 3819 __be32 3820 nfsd4_open_downgrade(struct svc_rqst *rqstp, 3821 struct nfsd4_compound_state *cstate, 3822 struct nfsd4_open_downgrade *od) 3823 { 3824 __be32 status; 3825 struct nfs4_ol_stateid *stp; 3826 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3827 3828 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 3829 (int)cstate->current_fh.fh_dentry->d_name.len, 3830 cstate->current_fh.fh_dentry->d_name.name); 3831 3832 /* We don't yet support WANT bits: */ 3833 if (od->od_deleg_want) 3834 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 3835 od->od_deleg_want); 3836 3837 nfs4_lock_state(); 3838 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 3839 &od->od_stateid, &stp, nn); 3840 if (status) 3841 goto out; 3842 status = nfserr_inval; 3843 if (!test_access(od->od_share_access, stp)) { 3844 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n", 3845 stp->st_access_bmap, od->od_share_access); 3846 goto out; 3847 } 3848 if (!test_deny(od->od_share_deny, stp)) { 3849 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 3850 stp->st_deny_bmap, od->od_share_deny); 3851 goto out; 3852 } 3853 nfs4_stateid_downgrade(stp, od->od_share_access); 3854 3855 reset_union_bmap_deny(od->od_share_deny, stp); 3856 3857 update_stateid(&stp->st_stid.sc_stateid); 3858 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3859 status = nfs_ok; 3860 out: 3861 nfsd4_bump_seqid(cstate, status); 3862 if (!cstate->replay_owner) 3863 nfs4_unlock_state(); 3864 return status; 3865 } 3866 3867 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 3868 { 3869 unhash_open_stateid(s); 3870 s->st_stid.sc_type = NFS4_CLOSED_STID; 3871 } 3872 3873 /* 3874 * nfs4_unlock_state() called after encode 3875 */ 3876 __be32 3877 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3878 struct nfsd4_close *close) 3879 { 3880 __be32 status; 3881 struct nfs4_openowner *oo; 3882 struct nfs4_ol_stateid *stp; 3883 struct net *net = SVC_NET(rqstp); 3884 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3885 3886 dprintk("NFSD: nfsd4_close on file %.*s\n", 3887 (int)cstate->current_fh.fh_dentry->d_name.len, 3888 cstate->current_fh.fh_dentry->d_name.name); 3889 3890 nfs4_lock_state(); 3891 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 3892 &close->cl_stateid, 3893 NFS4_OPEN_STID|NFS4_CLOSED_STID, 3894 &stp, nn); 3895 nfsd4_bump_seqid(cstate, status); 3896 if (status) 3897 goto out; 3898 oo = openowner(stp->st_stateowner); 3899 update_stateid(&stp->st_stid.sc_stateid); 3900 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3901 3902 nfsd4_close_open_stateid(stp); 3903 3904 if (cstate->minorversion) { 3905 unhash_stid(&stp->st_stid); 3906 free_generic_stateid(stp); 3907 } else 3908 oo->oo_last_closed_stid = stp; 3909 3910 if (list_empty(&oo->oo_owner.so_stateids)) { 3911 if (cstate->minorversion) 3912 release_openowner(oo); 3913 else { 3914 /* 3915 * In the 4.0 case we need to keep the owners around a 3916 * little while to handle CLOSE replay. 3917 */ 3918 move_to_close_lru(oo, SVC_NET(rqstp)); 3919 } 3920 } 3921 out: 3922 if (!cstate->replay_owner) 3923 nfs4_unlock_state(); 3924 return status; 3925 } 3926 3927 __be32 3928 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3929 struct nfsd4_delegreturn *dr) 3930 { 3931 struct nfs4_delegation *dp; 3932 stateid_t *stateid = &dr->dr_stateid; 3933 struct nfs4_stid *s; 3934 __be32 status; 3935 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3936 3937 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 3938 return status; 3939 3940 nfs4_lock_state(); 3941 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s, 3942 cstate->minorversion, nn); 3943 if (status) 3944 goto out; 3945 dp = delegstateid(s); 3946 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); 3947 if (status) 3948 goto out; 3949 3950 destroy_delegation(dp); 3951 out: 3952 nfs4_unlock_state(); 3953 3954 return status; 3955 } 3956 3957 3958 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 3959 3960 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1) 3961 3962 static inline u64 3963 end_offset(u64 start, u64 len) 3964 { 3965 u64 end; 3966 3967 end = start + len; 3968 return end >= start ? end: NFS4_MAX_UINT64; 3969 } 3970 3971 /* last octet in a range */ 3972 static inline u64 3973 last_byte_offset(u64 start, u64 len) 3974 { 3975 u64 end; 3976 3977 WARN_ON_ONCE(!len); 3978 end = start + len; 3979 return end > start ? end - 1: NFS4_MAX_UINT64; 3980 } 3981 3982 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername) 3983 { 3984 return (file_hashval(inode) + cl_id 3985 + opaque_hashval(ownername->data, ownername->len)) 3986 & LOCKOWNER_INO_HASH_MASK; 3987 } 3988 3989 /* 3990 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 3991 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 3992 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 3993 * locking, this prevents us from being completely protocol-compliant. The 3994 * real solution to this problem is to start using unsigned file offsets in 3995 * the VFS, but this is a very deep change! 3996 */ 3997 static inline void 3998 nfs4_transform_lock_offset(struct file_lock *lock) 3999 { 4000 if (lock->fl_start < 0) 4001 lock->fl_start = OFFSET_MAX; 4002 if (lock->fl_end < 0) 4003 lock->fl_end = OFFSET_MAX; 4004 } 4005 4006 /* Hack!: For now, we're defining this just so we can use a pointer to it 4007 * as a unique cookie to identify our (NFSv4's) posix locks. */ 4008 static const struct lock_manager_operations nfsd_posix_mng_ops = { 4009 }; 4010 4011 static inline void 4012 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 4013 { 4014 struct nfs4_lockowner *lo; 4015 4016 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 4017 lo = (struct nfs4_lockowner *) fl->fl_owner; 4018 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, 4019 lo->lo_owner.so_owner.len, GFP_KERNEL); 4020 if (!deny->ld_owner.data) 4021 /* We just don't care that much */ 4022 goto nevermind; 4023 deny->ld_owner.len = lo->lo_owner.so_owner.len; 4024 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 4025 } else { 4026 nevermind: 4027 deny->ld_owner.len = 0; 4028 deny->ld_owner.data = NULL; 4029 deny->ld_clientid.cl_boot = 0; 4030 deny->ld_clientid.cl_id = 0; 4031 } 4032 deny->ld_start = fl->fl_start; 4033 deny->ld_length = NFS4_MAX_UINT64; 4034 if (fl->fl_end != NFS4_MAX_UINT64) 4035 deny->ld_length = fl->fl_end - fl->fl_start + 1; 4036 deny->ld_type = NFS4_READ_LT; 4037 if (fl->fl_type != F_RDLCK) 4038 deny->ld_type = NFS4_WRITE_LT; 4039 } 4040 4041 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) 4042 { 4043 struct nfs4_ol_stateid *lst; 4044 4045 if (!same_owner_str(&lo->lo_owner, owner, clid)) 4046 return false; 4047 lst = list_first_entry(&lo->lo_owner.so_stateids, 4048 struct nfs4_ol_stateid, st_perstateowner); 4049 return lst->st_file->fi_inode == inode; 4050 } 4051 4052 static struct nfs4_lockowner * 4053 find_lockowner_str(struct inode *inode, clientid_t *clid, 4054 struct xdr_netobj *owner, struct nfsd_net *nn) 4055 { 4056 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner); 4057 struct nfs4_lockowner *lo; 4058 4059 list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) { 4060 if (same_lockowner_ino(lo, inode, clid, owner)) 4061 return lo; 4062 } 4063 return NULL; 4064 } 4065 4066 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp) 4067 { 4068 struct inode *inode = open_stp->st_file->fi_inode; 4069 unsigned int inohash = lockowner_ino_hashval(inode, 4070 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner); 4071 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 4072 4073 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); 4074 list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]); 4075 list_add(&lo->lo_perstateid, &open_stp->st_lockowners); 4076 } 4077 4078 /* 4079 * Alloc a lock owner structure. 4080 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 4081 * occurred. 4082 * 4083 * strhashval = ownerstr_hashval 4084 */ 4085 4086 static struct nfs4_lockowner * 4087 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { 4088 struct nfs4_lockowner *lo; 4089 4090 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 4091 if (!lo) 4092 return NULL; 4093 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 4094 lo->lo_owner.so_is_open_owner = 0; 4095 /* It is the openowner seqid that will be incremented in encode in the 4096 * case of new lockowners; so increment the lock seqid manually: */ 4097 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1; 4098 hash_lockowner(lo, strhashval, clp, open_stp); 4099 return lo; 4100 } 4101 4102 static struct nfs4_ol_stateid * 4103 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp) 4104 { 4105 struct nfs4_ol_stateid *stp; 4106 struct nfs4_client *clp = lo->lo_owner.so_client; 4107 4108 stp = nfs4_alloc_stateid(clp); 4109 if (stp == NULL) 4110 return NULL; 4111 stp->st_stid.sc_type = NFS4_LOCK_STID; 4112 list_add(&stp->st_perfile, &fp->fi_stateids); 4113 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 4114 stp->st_stateowner = &lo->lo_owner; 4115 get_nfs4_file(fp); 4116 stp->st_file = fp; 4117 stp->st_access_bmap = 0; 4118 stp->st_deny_bmap = open_stp->st_deny_bmap; 4119 stp->st_openstp = open_stp; 4120 return stp; 4121 } 4122 4123 static int 4124 check_lock_length(u64 offset, u64 length) 4125 { 4126 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 4127 LOFF_OVERFLOW(offset, length))); 4128 } 4129 4130 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 4131 { 4132 struct nfs4_file *fp = lock_stp->st_file; 4133 int oflag = nfs4_access_to_omode(access); 4134 4135 if (test_access(access, lock_stp)) 4136 return; 4137 nfs4_file_get_access(fp, oflag); 4138 set_access(access, lock_stp); 4139 } 4140 4141 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) 4142 { 4143 struct nfs4_file *fi = ost->st_file; 4144 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 4145 struct nfs4_client *cl = oo->oo_owner.so_client; 4146 struct nfs4_lockowner *lo; 4147 unsigned int strhashval; 4148 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id); 4149 4150 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, 4151 &lock->v.new.owner, nn); 4152 if (lo) { 4153 if (!cstate->minorversion) 4154 return nfserr_bad_seqid; 4155 /* XXX: a lockowner always has exactly one stateid: */ 4156 *lst = list_first_entry(&lo->lo_owner.so_stateids, 4157 struct nfs4_ol_stateid, st_perstateowner); 4158 return nfs_ok; 4159 } 4160 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id, 4161 &lock->v.new.owner); 4162 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 4163 if (lo == NULL) 4164 return nfserr_jukebox; 4165 *lst = alloc_init_lock_stateid(lo, fi, ost); 4166 if (*lst == NULL) { 4167 release_lockowner(lo); 4168 return nfserr_jukebox; 4169 } 4170 *new = true; 4171 return nfs_ok; 4172 } 4173 4174 /* 4175 * LOCK operation 4176 */ 4177 __be32 4178 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4179 struct nfsd4_lock *lock) 4180 { 4181 struct nfs4_openowner *open_sop = NULL; 4182 struct nfs4_lockowner *lock_sop = NULL; 4183 struct nfs4_ol_stateid *lock_stp; 4184 struct file *filp = NULL; 4185 struct file_lock *file_lock = NULL; 4186 struct file_lock *conflock = NULL; 4187 __be32 status = 0; 4188 bool new_state = false; 4189 int lkflg; 4190 int err; 4191 struct net *net = SVC_NET(rqstp); 4192 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4193 4194 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 4195 (long long) lock->lk_offset, 4196 (long long) lock->lk_length); 4197 4198 if (check_lock_length(lock->lk_offset, lock->lk_length)) 4199 return nfserr_inval; 4200 4201 if ((status = fh_verify(rqstp, &cstate->current_fh, 4202 S_IFREG, NFSD_MAY_LOCK))) { 4203 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 4204 return status; 4205 } 4206 4207 nfs4_lock_state(); 4208 4209 if (lock->lk_is_new) { 4210 struct nfs4_ol_stateid *open_stp = NULL; 4211 4212 if (nfsd4_has_session(cstate)) 4213 /* See rfc 5661 18.10.3: given clientid is ignored: */ 4214 memcpy(&lock->v.new.clientid, 4215 &cstate->session->se_client->cl_clientid, 4216 sizeof(clientid_t)); 4217 4218 status = nfserr_stale_clientid; 4219 if (STALE_CLIENTID(&lock->lk_new_clientid, nn)) 4220 goto out; 4221 4222 /* validate and update open stateid and open seqid */ 4223 status = nfs4_preprocess_confirmed_seqid_op(cstate, 4224 lock->lk_new_open_seqid, 4225 &lock->lk_new_open_stateid, 4226 &open_stp, nn); 4227 if (status) 4228 goto out; 4229 open_sop = openowner(open_stp->st_stateowner); 4230 status = nfserr_bad_stateid; 4231 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 4232 &lock->v.new.clientid)) 4233 goto out; 4234 status = lookup_or_create_lock_state(cstate, open_stp, lock, 4235 &lock_stp, &new_state); 4236 } else 4237 status = nfs4_preprocess_seqid_op(cstate, 4238 lock->lk_old_lock_seqid, 4239 &lock->lk_old_lock_stateid, 4240 NFS4_LOCK_STID, &lock_stp, nn); 4241 if (status) 4242 goto out; 4243 lock_sop = lockowner(lock_stp->st_stateowner); 4244 4245 lkflg = setlkflg(lock->lk_type); 4246 status = nfs4_check_openmode(lock_stp, lkflg); 4247 if (status) 4248 goto out; 4249 4250 status = nfserr_grace; 4251 if (locks_in_grace(net) && !lock->lk_reclaim) 4252 goto out; 4253 status = nfserr_no_grace; 4254 if (!locks_in_grace(net) && lock->lk_reclaim) 4255 goto out; 4256 4257 file_lock = locks_alloc_lock(); 4258 if (!file_lock) { 4259 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4260 status = nfserr_jukebox; 4261 goto out; 4262 } 4263 4264 locks_init_lock(file_lock); 4265 switch (lock->lk_type) { 4266 case NFS4_READ_LT: 4267 case NFS4_READW_LT: 4268 filp = find_readable_file(lock_stp->st_file); 4269 if (filp) 4270 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 4271 file_lock->fl_type = F_RDLCK; 4272 break; 4273 case NFS4_WRITE_LT: 4274 case NFS4_WRITEW_LT: 4275 filp = find_writeable_file(lock_stp->st_file); 4276 if (filp) 4277 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 4278 file_lock->fl_type = F_WRLCK; 4279 break; 4280 default: 4281 status = nfserr_inval; 4282 goto out; 4283 } 4284 if (!filp) { 4285 status = nfserr_openmode; 4286 goto out; 4287 } 4288 file_lock->fl_owner = (fl_owner_t)lock_sop; 4289 file_lock->fl_pid = current->tgid; 4290 file_lock->fl_file = filp; 4291 file_lock->fl_flags = FL_POSIX; 4292 file_lock->fl_lmops = &nfsd_posix_mng_ops; 4293 file_lock->fl_start = lock->lk_offset; 4294 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 4295 nfs4_transform_lock_offset(file_lock); 4296 4297 conflock = locks_alloc_lock(); 4298 if (!conflock) { 4299 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4300 status = nfserr_jukebox; 4301 goto out; 4302 } 4303 4304 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 4305 switch (-err) { 4306 case 0: /* success! */ 4307 update_stateid(&lock_stp->st_stid.sc_stateid); 4308 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 4309 sizeof(stateid_t)); 4310 status = 0; 4311 break; 4312 case (EAGAIN): /* conflock holds conflicting lock */ 4313 status = nfserr_denied; 4314 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 4315 nfs4_set_lock_denied(conflock, &lock->lk_denied); 4316 break; 4317 case (EDEADLK): 4318 status = nfserr_deadlock; 4319 break; 4320 default: 4321 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 4322 status = nfserrno(err); 4323 break; 4324 } 4325 out: 4326 if (status && new_state) 4327 release_lockowner(lock_sop); 4328 nfsd4_bump_seqid(cstate, status); 4329 if (!cstate->replay_owner) 4330 nfs4_unlock_state(); 4331 if (file_lock) 4332 locks_free_lock(file_lock); 4333 if (conflock) 4334 locks_free_lock(conflock); 4335 return status; 4336 } 4337 4338 /* 4339 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 4340 * so we do a temporary open here just to get an open file to pass to 4341 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 4342 * inode operation.) 4343 */ 4344 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 4345 { 4346 struct file *file; 4347 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 4348 if (!err) { 4349 err = nfserrno(vfs_test_lock(file, lock)); 4350 nfsd_close(file); 4351 } 4352 return err; 4353 } 4354 4355 /* 4356 * LOCKT operation 4357 */ 4358 __be32 4359 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4360 struct nfsd4_lockt *lockt) 4361 { 4362 struct inode *inode; 4363 struct file_lock *file_lock = NULL; 4364 struct nfs4_lockowner *lo; 4365 __be32 status; 4366 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4367 4368 if (locks_in_grace(SVC_NET(rqstp))) 4369 return nfserr_grace; 4370 4371 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 4372 return nfserr_inval; 4373 4374 nfs4_lock_state(); 4375 4376 if (!nfsd4_has_session(cstate)) { 4377 status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL); 4378 if (status) 4379 goto out; 4380 } 4381 4382 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 4383 goto out; 4384 4385 inode = cstate->current_fh.fh_dentry->d_inode; 4386 file_lock = locks_alloc_lock(); 4387 if (!file_lock) { 4388 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4389 status = nfserr_jukebox; 4390 goto out; 4391 } 4392 locks_init_lock(file_lock); 4393 switch (lockt->lt_type) { 4394 case NFS4_READ_LT: 4395 case NFS4_READW_LT: 4396 file_lock->fl_type = F_RDLCK; 4397 break; 4398 case NFS4_WRITE_LT: 4399 case NFS4_WRITEW_LT: 4400 file_lock->fl_type = F_WRLCK; 4401 break; 4402 default: 4403 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 4404 status = nfserr_inval; 4405 goto out; 4406 } 4407 4408 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn); 4409 if (lo) 4410 file_lock->fl_owner = (fl_owner_t)lo; 4411 file_lock->fl_pid = current->tgid; 4412 file_lock->fl_flags = FL_POSIX; 4413 4414 file_lock->fl_start = lockt->lt_offset; 4415 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 4416 4417 nfs4_transform_lock_offset(file_lock); 4418 4419 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 4420 if (status) 4421 goto out; 4422 4423 if (file_lock->fl_type != F_UNLCK) { 4424 status = nfserr_denied; 4425 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 4426 } 4427 out: 4428 nfs4_unlock_state(); 4429 if (file_lock) 4430 locks_free_lock(file_lock); 4431 return status; 4432 } 4433 4434 __be32 4435 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4436 struct nfsd4_locku *locku) 4437 { 4438 struct nfs4_lockowner *lo; 4439 struct nfs4_ol_stateid *stp; 4440 struct file *filp = NULL; 4441 struct file_lock *file_lock = NULL; 4442 __be32 status; 4443 int err; 4444 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4445 4446 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 4447 (long long) locku->lu_offset, 4448 (long long) locku->lu_length); 4449 4450 if (check_lock_length(locku->lu_offset, locku->lu_length)) 4451 return nfserr_inval; 4452 4453 nfs4_lock_state(); 4454 4455 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 4456 &locku->lu_stateid, NFS4_LOCK_STID, 4457 &stp, nn); 4458 if (status) 4459 goto out; 4460 filp = find_any_file(stp->st_file); 4461 if (!filp) { 4462 status = nfserr_lock_range; 4463 goto out; 4464 } 4465 file_lock = locks_alloc_lock(); 4466 if (!file_lock) { 4467 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4468 status = nfserr_jukebox; 4469 goto out; 4470 } 4471 lo = lockowner(stp->st_stateowner); 4472 locks_init_lock(file_lock); 4473 file_lock->fl_type = F_UNLCK; 4474 file_lock->fl_owner = (fl_owner_t)lo; 4475 file_lock->fl_pid = current->tgid; 4476 file_lock->fl_file = filp; 4477 file_lock->fl_flags = FL_POSIX; 4478 file_lock->fl_lmops = &nfsd_posix_mng_ops; 4479 file_lock->fl_start = locku->lu_offset; 4480 4481 file_lock->fl_end = last_byte_offset(locku->lu_offset, 4482 locku->lu_length); 4483 nfs4_transform_lock_offset(file_lock); 4484 4485 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); 4486 if (err) { 4487 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 4488 goto out_nfserr; 4489 } 4490 update_stateid(&stp->st_stid.sc_stateid); 4491 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4492 4493 if (nfsd4_has_session(cstate) && !check_for_locks(stp->st_file, lo)) { 4494 WARN_ON_ONCE(cstate->replay_owner); 4495 release_lockowner(lo); 4496 } 4497 4498 out: 4499 nfsd4_bump_seqid(cstate, status); 4500 if (!cstate->replay_owner) 4501 nfs4_unlock_state(); 4502 if (file_lock) 4503 locks_free_lock(file_lock); 4504 return status; 4505 4506 out_nfserr: 4507 status = nfserrno(err); 4508 goto out; 4509 } 4510 4511 /* 4512 * returns 4513 * 1: locks held by lockowner 4514 * 0: no locks held by lockowner 4515 */ 4516 static int 4517 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) 4518 { 4519 struct file_lock **flpp; 4520 struct inode *inode = filp->fi_inode; 4521 int status = 0; 4522 4523 spin_lock(&inode->i_lock); 4524 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 4525 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 4526 status = 1; 4527 goto out; 4528 } 4529 } 4530 out: 4531 spin_unlock(&inode->i_lock); 4532 return status; 4533 } 4534 4535 __be32 4536 nfsd4_release_lockowner(struct svc_rqst *rqstp, 4537 struct nfsd4_compound_state *cstate, 4538 struct nfsd4_release_lockowner *rlockowner) 4539 { 4540 clientid_t *clid = &rlockowner->rl_clientid; 4541 struct nfs4_stateowner *sop; 4542 struct nfs4_lockowner *lo; 4543 struct nfs4_ol_stateid *stp; 4544 struct xdr_netobj *owner = &rlockowner->rl_owner; 4545 struct list_head matches; 4546 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner); 4547 __be32 status; 4548 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4549 4550 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 4551 clid->cl_boot, clid->cl_id); 4552 4553 nfs4_lock_state(); 4554 4555 status = lookup_clientid(clid, cstate->minorversion, nn, NULL); 4556 if (status) 4557 goto out; 4558 4559 status = nfserr_locks_held; 4560 INIT_LIST_HEAD(&matches); 4561 4562 list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) { 4563 if (sop->so_is_open_owner) 4564 continue; 4565 if (!same_owner_str(sop, owner, clid)) 4566 continue; 4567 list_for_each_entry(stp, &sop->so_stateids, 4568 st_perstateowner) { 4569 lo = lockowner(sop); 4570 if (check_for_locks(stp->st_file, lo)) 4571 goto out; 4572 list_add(&lo->lo_list, &matches); 4573 } 4574 } 4575 /* Clients probably won't expect us to return with some (but not all) 4576 * of the lockowner state released; so don't release any until all 4577 * have been checked. */ 4578 status = nfs_ok; 4579 while (!list_empty(&matches)) { 4580 lo = list_entry(matches.next, struct nfs4_lockowner, 4581 lo_list); 4582 /* unhash_stateowner deletes so_perclient only 4583 * for openowners. */ 4584 list_del(&lo->lo_list); 4585 release_lockowner(lo); 4586 } 4587 out: 4588 nfs4_unlock_state(); 4589 return status; 4590 } 4591 4592 static inline struct nfs4_client_reclaim * 4593 alloc_reclaim(void) 4594 { 4595 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 4596 } 4597 4598 bool 4599 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn) 4600 { 4601 struct nfs4_client_reclaim *crp; 4602 4603 crp = nfsd4_find_reclaim_client(name, nn); 4604 return (crp && crp->cr_clp); 4605 } 4606 4607 /* 4608 * failure => all reset bets are off, nfserr_no_grace... 4609 */ 4610 struct nfs4_client_reclaim * 4611 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn) 4612 { 4613 unsigned int strhashval; 4614 struct nfs4_client_reclaim *crp; 4615 4616 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 4617 crp = alloc_reclaim(); 4618 if (crp) { 4619 strhashval = clientstr_hashval(name); 4620 INIT_LIST_HEAD(&crp->cr_strhash); 4621 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 4622 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 4623 crp->cr_clp = NULL; 4624 nn->reclaim_str_hashtbl_size++; 4625 } 4626 return crp; 4627 } 4628 4629 void 4630 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 4631 { 4632 list_del(&crp->cr_strhash); 4633 kfree(crp); 4634 nn->reclaim_str_hashtbl_size--; 4635 } 4636 4637 void 4638 nfs4_release_reclaim(struct nfsd_net *nn) 4639 { 4640 struct nfs4_client_reclaim *crp = NULL; 4641 int i; 4642 4643 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4644 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 4645 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 4646 struct nfs4_client_reclaim, cr_strhash); 4647 nfs4_remove_reclaim_record(crp, nn); 4648 } 4649 } 4650 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 4651 } 4652 4653 /* 4654 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 4655 struct nfs4_client_reclaim * 4656 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) 4657 { 4658 unsigned int strhashval; 4659 struct nfs4_client_reclaim *crp = NULL; 4660 4661 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir); 4662 4663 strhashval = clientstr_hashval(recdir); 4664 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 4665 if (same_name(crp->cr_recdir, recdir)) { 4666 return crp; 4667 } 4668 } 4669 return NULL; 4670 } 4671 4672 /* 4673 * Called from OPEN. Look for clientid in reclaim list. 4674 */ 4675 __be32 4676 nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn) 4677 { 4678 struct nfs4_client *clp; 4679 4680 /* find clientid in conf_id_hashtbl */ 4681 clp = find_confirmed_client(clid, sessions, nn); 4682 if (clp == NULL) 4683 return nfserr_reclaim_bad; 4684 4685 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok; 4686 } 4687 4688 #ifdef CONFIG_NFSD_FAULT_INJECTION 4689 4690 u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) 4691 { 4692 if (mark_client_expired(clp)) 4693 return 0; 4694 expire_client(clp); 4695 return 1; 4696 } 4697 4698 u64 nfsd_print_client(struct nfs4_client *clp, u64 num) 4699 { 4700 char buf[INET6_ADDRSTRLEN]; 4701 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 4702 printk(KERN_INFO "NFS Client: %s\n", buf); 4703 return 1; 4704 } 4705 4706 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, 4707 const char *type) 4708 { 4709 char buf[INET6_ADDRSTRLEN]; 4710 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 4711 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); 4712 } 4713 4714 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *)) 4715 { 4716 struct nfs4_openowner *oop; 4717 struct nfs4_lockowner *lop, *lo_next; 4718 struct nfs4_ol_stateid *stp, *st_next; 4719 u64 count = 0; 4720 4721 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { 4722 list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) { 4723 list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) { 4724 if (func) 4725 func(lop); 4726 if (++count == max) 4727 return count; 4728 } 4729 } 4730 } 4731 4732 return count; 4733 } 4734 4735 u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max) 4736 { 4737 return nfsd_foreach_client_lock(clp, max, release_lockowner); 4738 } 4739 4740 u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max) 4741 { 4742 u64 count = nfsd_foreach_client_lock(clp, max, NULL); 4743 nfsd_print_count(clp, count, "locked files"); 4744 return count; 4745 } 4746 4747 static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *)) 4748 { 4749 struct nfs4_openowner *oop, *next; 4750 u64 count = 0; 4751 4752 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { 4753 if (func) 4754 func(oop); 4755 if (++count == max) 4756 break; 4757 } 4758 4759 return count; 4760 } 4761 4762 u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max) 4763 { 4764 return nfsd_foreach_client_open(clp, max, release_openowner); 4765 } 4766 4767 u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max) 4768 { 4769 u64 count = nfsd_foreach_client_open(clp, max, NULL); 4770 nfsd_print_count(clp, count, "open files"); 4771 return count; 4772 } 4773 4774 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, 4775 struct list_head *victims) 4776 { 4777 struct nfs4_delegation *dp, *next; 4778 u64 count = 0; 4779 4780 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { 4781 if (victims) 4782 list_move(&dp->dl_recall_lru, victims); 4783 if (++count == max) 4784 break; 4785 } 4786 return count; 4787 } 4788 4789 u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max) 4790 { 4791 struct nfs4_delegation *dp, *next; 4792 LIST_HEAD(victims); 4793 u64 count; 4794 4795 spin_lock(&recall_lock); 4796 count = nfsd_find_all_delegations(clp, max, &victims); 4797 spin_unlock(&recall_lock); 4798 4799 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) 4800 revoke_delegation(dp); 4801 4802 return count; 4803 } 4804 4805 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max) 4806 { 4807 struct nfs4_delegation *dp, *next; 4808 LIST_HEAD(victims); 4809 u64 count; 4810 4811 spin_lock(&recall_lock); 4812 count = nfsd_find_all_delegations(clp, max, &victims); 4813 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) 4814 nfsd_break_one_deleg(dp); 4815 spin_unlock(&recall_lock); 4816 4817 return count; 4818 } 4819 4820 u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max) 4821 { 4822 u64 count = 0; 4823 4824 spin_lock(&recall_lock); 4825 count = nfsd_find_all_delegations(clp, max, NULL); 4826 spin_unlock(&recall_lock); 4827 4828 nfsd_print_count(clp, count, "delegations"); 4829 return count; 4830 } 4831 4832 u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64)) 4833 { 4834 struct nfs4_client *clp, *next; 4835 u64 count = 0; 4836 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); 4837 4838 if (!nfsd_netns_ready(nn)) 4839 return 0; 4840 4841 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 4842 count += func(clp, max - count); 4843 if ((max != 0) && (count >= max)) 4844 break; 4845 } 4846 4847 return count; 4848 } 4849 4850 struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) 4851 { 4852 struct nfs4_client *clp; 4853 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); 4854 4855 if (!nfsd_netns_ready(nn)) 4856 return NULL; 4857 4858 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 4859 if (memcmp(&clp->cl_addr, addr, addr_size) == 0) 4860 return clp; 4861 } 4862 return NULL; 4863 } 4864 4865 #endif /* CONFIG_NFSD_FAULT_INJECTION */ 4866 4867 /* initialization to perform at module load time: */ 4868 4869 void 4870 nfs4_state_init(void) 4871 { 4872 } 4873 4874 /* 4875 * Since the lifetime of a delegation isn't limited to that of an open, a 4876 * client may quite reasonably hang on to a delegation as long as it has 4877 * the inode cached. This becomes an obvious problem the first time a 4878 * client's inode cache approaches the size of the server's total memory. 4879 * 4880 * For now we avoid this problem by imposing a hard limit on the number 4881 * of delegations, which varies according to the server's memory size. 4882 */ 4883 static void 4884 set_max_delegations(void) 4885 { 4886 /* 4887 * Allow at most 4 delegations per megabyte of RAM. Quick 4888 * estimates suggest that in the worst case (where every delegation 4889 * is for a different inode), a delegation could take about 1.5K, 4890 * giving a worst case usage of about 6% of memory. 4891 */ 4892 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 4893 } 4894 4895 static int nfs4_state_create_net(struct net *net) 4896 { 4897 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4898 int i; 4899 4900 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) * 4901 CLIENT_HASH_SIZE, GFP_KERNEL); 4902 if (!nn->conf_id_hashtbl) 4903 goto err; 4904 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) * 4905 CLIENT_HASH_SIZE, GFP_KERNEL); 4906 if (!nn->unconf_id_hashtbl) 4907 goto err_unconf_id; 4908 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * 4909 OWNER_HASH_SIZE, GFP_KERNEL); 4910 if (!nn->ownerstr_hashtbl) 4911 goto err_ownerstr; 4912 nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) * 4913 LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL); 4914 if (!nn->lockowner_ino_hashtbl) 4915 goto err_lockowner_ino; 4916 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * 4917 SESSION_HASH_SIZE, GFP_KERNEL); 4918 if (!nn->sessionid_hashtbl) 4919 goto err_sessionid; 4920 4921 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4922 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 4923 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 4924 } 4925 for (i = 0; i < OWNER_HASH_SIZE; i++) 4926 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]); 4927 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++) 4928 INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]); 4929 for (i = 0; i < SESSION_HASH_SIZE; i++) 4930 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 4931 nn->conf_name_tree = RB_ROOT; 4932 nn->unconf_name_tree = RB_ROOT; 4933 INIT_LIST_HEAD(&nn->client_lru); 4934 INIT_LIST_HEAD(&nn->close_lru); 4935 INIT_LIST_HEAD(&nn->del_recall_lru); 4936 spin_lock_init(&nn->client_lock); 4937 4938 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 4939 get_net(net); 4940 4941 return 0; 4942 4943 err_sessionid: 4944 kfree(nn->lockowner_ino_hashtbl); 4945 err_lockowner_ino: 4946 kfree(nn->ownerstr_hashtbl); 4947 err_ownerstr: 4948 kfree(nn->unconf_id_hashtbl); 4949 err_unconf_id: 4950 kfree(nn->conf_id_hashtbl); 4951 err: 4952 return -ENOMEM; 4953 } 4954 4955 static void 4956 nfs4_state_destroy_net(struct net *net) 4957 { 4958 int i; 4959 struct nfs4_client *clp = NULL; 4960 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4961 struct rb_node *node, *tmp; 4962 4963 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4964 while (!list_empty(&nn->conf_id_hashtbl[i])) { 4965 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 4966 destroy_client(clp); 4967 } 4968 } 4969 4970 node = rb_first(&nn->unconf_name_tree); 4971 while (node != NULL) { 4972 tmp = node; 4973 node = rb_next(tmp); 4974 clp = rb_entry(tmp, struct nfs4_client, cl_namenode); 4975 rb_erase(tmp, &nn->unconf_name_tree); 4976 destroy_client(clp); 4977 } 4978 4979 kfree(nn->sessionid_hashtbl); 4980 kfree(nn->lockowner_ino_hashtbl); 4981 kfree(nn->ownerstr_hashtbl); 4982 kfree(nn->unconf_id_hashtbl); 4983 kfree(nn->conf_id_hashtbl); 4984 put_net(net); 4985 } 4986 4987 int 4988 nfs4_state_start_net(struct net *net) 4989 { 4990 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4991 int ret; 4992 4993 ret = nfs4_state_create_net(net); 4994 if (ret) 4995 return ret; 4996 nfsd4_client_tracking_init(net); 4997 nn->boot_time = get_seconds(); 4998 locks_start_grace(net, &nn->nfsd4_manager); 4999 nn->grace_ended = false; 5000 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", 5001 nn->nfsd4_grace, net); 5002 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 5003 return 0; 5004 } 5005 5006 /* initialization to perform when the nfsd service is started: */ 5007 5008 int 5009 nfs4_state_start(void) 5010 { 5011 int ret; 5012 5013 ret = set_callback_cred(); 5014 if (ret) 5015 return -ENOMEM; 5016 laundry_wq = create_singlethread_workqueue("nfsd4"); 5017 if (laundry_wq == NULL) { 5018 ret = -ENOMEM; 5019 goto out_recovery; 5020 } 5021 ret = nfsd4_create_callback_queue(); 5022 if (ret) 5023 goto out_free_laundry; 5024 5025 set_max_delegations(); 5026 5027 return 0; 5028 5029 out_free_laundry: 5030 destroy_workqueue(laundry_wq); 5031 out_recovery: 5032 return ret; 5033 } 5034 5035 /* should be called with the state lock held */ 5036 void 5037 nfs4_state_shutdown_net(struct net *net) 5038 { 5039 struct nfs4_delegation *dp = NULL; 5040 struct list_head *pos, *next, reaplist; 5041 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5042 5043 cancel_delayed_work_sync(&nn->laundromat_work); 5044 locks_end_grace(&nn->nfsd4_manager); 5045 5046 INIT_LIST_HEAD(&reaplist); 5047 spin_lock(&recall_lock); 5048 list_for_each_safe(pos, next, &nn->del_recall_lru) { 5049 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5050 list_move(&dp->dl_recall_lru, &reaplist); 5051 } 5052 spin_unlock(&recall_lock); 5053 list_for_each_safe(pos, next, &reaplist) { 5054 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5055 destroy_delegation(dp); 5056 } 5057 5058 nfsd4_client_tracking_exit(net); 5059 nfs4_state_destroy_net(net); 5060 } 5061 5062 void 5063 nfs4_state_shutdown(void) 5064 { 5065 destroy_workqueue(laundry_wq); 5066 nfsd4_destroy_callback_queue(); 5067 } 5068 5069 static void 5070 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 5071 { 5072 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) 5073 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 5074 } 5075 5076 static void 5077 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 5078 { 5079 if (cstate->minorversion) { 5080 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 5081 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 5082 } 5083 } 5084 5085 void 5086 clear_current_stateid(struct nfsd4_compound_state *cstate) 5087 { 5088 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 5089 } 5090 5091 /* 5092 * functions to set current state id 5093 */ 5094 void 5095 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 5096 { 5097 put_stateid(cstate, &odp->od_stateid); 5098 } 5099 5100 void 5101 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) 5102 { 5103 put_stateid(cstate, &open->op_stateid); 5104 } 5105 5106 void 5107 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 5108 { 5109 put_stateid(cstate, &close->cl_stateid); 5110 } 5111 5112 void 5113 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) 5114 { 5115 put_stateid(cstate, &lock->lk_resp_stateid); 5116 } 5117 5118 /* 5119 * functions to consume current state id 5120 */ 5121 5122 void 5123 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 5124 { 5125 get_stateid(cstate, &odp->od_stateid); 5126 } 5127 5128 void 5129 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) 5130 { 5131 get_stateid(cstate, &drp->dr_stateid); 5132 } 5133 5134 void 5135 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) 5136 { 5137 get_stateid(cstate, &fsp->fr_stateid); 5138 } 5139 5140 void 5141 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) 5142 { 5143 get_stateid(cstate, &setattr->sa_stateid); 5144 } 5145 5146 void 5147 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 5148 { 5149 get_stateid(cstate, &close->cl_stateid); 5150 } 5151 5152 void 5153 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) 5154 { 5155 get_stateid(cstate, &locku->lu_stateid); 5156 } 5157 5158 void 5159 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) 5160 { 5161 get_stateid(cstate, &read->rd_stateid); 5162 } 5163 5164 void 5165 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) 5166 { 5167 get_stateid(cstate, &write->wr_stateid); 5168 } 5169