1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/sunrpc/svcauth_gss.h> 41 #include <linux/sunrpc/clnt.h> 42 #include "xdr4.h" 43 #include "vfs.h" 44 45 #define NFSDDBG_FACILITY NFSDDBG_PROC 46 47 /* Globals */ 48 time_t nfsd4_lease = 90; /* default lease time */ 49 time_t nfsd4_grace = 90; 50 static time_t boot_time; 51 static u32 current_ownerid = 1; 52 static u32 current_fileid = 1; 53 static u32 current_delegid = 1; 54 static stateid_t zerostateid; /* bits all 0 */ 55 static stateid_t onestateid; /* bits all 1 */ 56 static u64 current_sessionid = 1; 57 58 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t))) 59 #define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t))) 60 61 /* forward declarations */ 62 static struct nfs4_stateid * find_stateid(stateid_t *stid, int flags); 63 static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid); 64 static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery"; 65 static void nfs4_set_recdir(char *recdir); 66 67 /* Locking: */ 68 69 /* Currently used for almost all code touching nfsv4 state: */ 70 static DEFINE_MUTEX(client_mutex); 71 72 /* 73 * Currently used for the del_recall_lru and file hash table. In an 74 * effort to decrease the scope of the client_mutex, this spinlock may 75 * eventually cover more: 76 */ 77 static DEFINE_SPINLOCK(recall_lock); 78 79 static struct kmem_cache *stateowner_slab = NULL; 80 static struct kmem_cache *file_slab = NULL; 81 static struct kmem_cache *stateid_slab = NULL; 82 static struct kmem_cache *deleg_slab = NULL; 83 84 void 85 nfs4_lock_state(void) 86 { 87 mutex_lock(&client_mutex); 88 } 89 90 void 91 nfs4_unlock_state(void) 92 { 93 mutex_unlock(&client_mutex); 94 } 95 96 static inline u32 97 opaque_hashval(const void *ptr, int nbytes) 98 { 99 unsigned char *cptr = (unsigned char *) ptr; 100 101 u32 x = 0; 102 while (nbytes--) { 103 x *= 37; 104 x += *cptr++; 105 } 106 return x; 107 } 108 109 static struct list_head del_recall_lru; 110 111 static inline void 112 put_nfs4_file(struct nfs4_file *fi) 113 { 114 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { 115 list_del(&fi->fi_hash); 116 spin_unlock(&recall_lock); 117 iput(fi->fi_inode); 118 kmem_cache_free(file_slab, fi); 119 } 120 } 121 122 static inline void 123 get_nfs4_file(struct nfs4_file *fi) 124 { 125 atomic_inc(&fi->fi_ref); 126 } 127 128 static int num_delegations; 129 unsigned int max_delegations; 130 131 /* 132 * Open owner state (share locks) 133 */ 134 135 /* hash tables for nfs4_stateowner */ 136 #define OWNER_HASH_BITS 8 137 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 138 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 139 140 #define ownerid_hashval(id) \ 141 ((id) & OWNER_HASH_MASK) 142 #define ownerstr_hashval(clientid, ownername) \ 143 (((clientid) + opaque_hashval((ownername.data), (ownername.len))) & OWNER_HASH_MASK) 144 145 static struct list_head ownerid_hashtbl[OWNER_HASH_SIZE]; 146 static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE]; 147 148 /* hash table for nfs4_file */ 149 #define FILE_HASH_BITS 8 150 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 151 152 /* hash table for (open)nfs4_stateid */ 153 #define STATEID_HASH_BITS 10 154 #define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS) 155 #define STATEID_HASH_MASK (STATEID_HASH_SIZE - 1) 156 157 #define file_hashval(x) \ 158 hash_ptr(x, FILE_HASH_BITS) 159 #define stateid_hashval(owner_id, file_id) \ 160 (((owner_id) + (file_id)) & STATEID_HASH_MASK) 161 162 static struct list_head file_hashtbl[FILE_HASH_SIZE]; 163 static struct list_head stateid_hashtbl[STATEID_HASH_SIZE]; 164 165 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) 166 { 167 BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); 168 atomic_inc(&fp->fi_access[oflag]); 169 } 170 171 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) 172 { 173 if (oflag == O_RDWR) { 174 __nfs4_file_get_access(fp, O_RDONLY); 175 __nfs4_file_get_access(fp, O_WRONLY); 176 } else 177 __nfs4_file_get_access(fp, oflag); 178 } 179 180 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) 181 { 182 if (fp->fi_fds[oflag]) { 183 fput(fp->fi_fds[oflag]); 184 fp->fi_fds[oflag] = NULL; 185 } 186 } 187 188 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 189 { 190 if (atomic_dec_and_test(&fp->fi_access[oflag])) { 191 nfs4_file_put_fd(fp, O_RDWR); 192 nfs4_file_put_fd(fp, oflag); 193 } 194 } 195 196 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) 197 { 198 if (oflag == O_RDWR) { 199 __nfs4_file_put_access(fp, O_RDONLY); 200 __nfs4_file_put_access(fp, O_WRONLY); 201 } else 202 __nfs4_file_put_access(fp, oflag); 203 } 204 205 static struct nfs4_delegation * 206 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_fh *current_fh, u32 type) 207 { 208 struct nfs4_delegation *dp; 209 struct nfs4_file *fp = stp->st_file; 210 211 dprintk("NFSD alloc_init_deleg\n"); 212 /* 213 * Major work on the lease subsystem (for example, to support 214 * calbacks on stat) will be required before we can support 215 * write delegations properly. 216 */ 217 if (type != NFS4_OPEN_DELEGATE_READ) 218 return NULL; 219 if (fp->fi_had_conflict) 220 return NULL; 221 if (num_delegations > max_delegations) 222 return NULL; 223 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL); 224 if (dp == NULL) 225 return dp; 226 num_delegations++; 227 INIT_LIST_HEAD(&dp->dl_perfile); 228 INIT_LIST_HEAD(&dp->dl_perclnt); 229 INIT_LIST_HEAD(&dp->dl_recall_lru); 230 dp->dl_client = clp; 231 get_nfs4_file(fp); 232 dp->dl_file = fp; 233 dp->dl_type = type; 234 dp->dl_stateid.si_boot = boot_time; 235 dp->dl_stateid.si_stateownerid = current_delegid++; 236 dp->dl_stateid.si_fileid = 0; 237 dp->dl_stateid.si_generation = 0; 238 fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); 239 dp->dl_time = 0; 240 atomic_set(&dp->dl_count, 1); 241 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); 242 return dp; 243 } 244 245 void 246 nfs4_put_delegation(struct nfs4_delegation *dp) 247 { 248 if (atomic_dec_and_test(&dp->dl_count)) { 249 dprintk("NFSD: freeing dp %p\n",dp); 250 put_nfs4_file(dp->dl_file); 251 kmem_cache_free(deleg_slab, dp); 252 num_delegations--; 253 } 254 } 255 256 static void nfs4_put_deleg_lease(struct nfs4_file *fp) 257 { 258 if (atomic_dec_and_test(&fp->fi_delegees)) { 259 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); 260 fp->fi_lease = NULL; 261 fput(fp->fi_deleg_file); 262 fp->fi_deleg_file = NULL; 263 } 264 } 265 266 /* Called under the state lock. */ 267 static void 268 unhash_delegation(struct nfs4_delegation *dp) 269 { 270 list_del_init(&dp->dl_perclnt); 271 spin_lock(&recall_lock); 272 list_del_init(&dp->dl_perfile); 273 list_del_init(&dp->dl_recall_lru); 274 spin_unlock(&recall_lock); 275 nfs4_put_deleg_lease(dp->dl_file); 276 nfs4_put_delegation(dp); 277 } 278 279 /* 280 * SETCLIENTID state 281 */ 282 283 /* client_lock protects the client lru list and session hash table */ 284 static DEFINE_SPINLOCK(client_lock); 285 286 /* Hash tables for nfs4_clientid state */ 287 #define CLIENT_HASH_BITS 4 288 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS) 289 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1) 290 291 #define clientid_hashval(id) \ 292 ((id) & CLIENT_HASH_MASK) 293 #define clientstr_hashval(name) \ 294 (opaque_hashval((name), 8) & CLIENT_HASH_MASK) 295 /* 296 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot 297 * used in reboot/reset lease grace period processing 298 * 299 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed 300 * setclientid_confirmed info. 301 * 302 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 303 * setclientid info. 304 * 305 * client_lru holds client queue ordered by nfs4_client.cl_time 306 * for lease renewal. 307 * 308 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time 309 * for last close replay. 310 */ 311 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE]; 312 static int reclaim_str_hashtbl_size = 0; 313 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE]; 314 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE]; 315 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE]; 316 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; 317 static struct list_head client_lru; 318 static struct list_head close_lru; 319 320 /* 321 * We store the NONE, READ, WRITE, and BOTH bits separately in the 322 * st_{access,deny}_bmap field of the stateid, in order to track not 323 * only what share bits are currently in force, but also what 324 * combinations of share bits previous opens have used. This allows us 325 * to enforce the recommendation of rfc 3530 14.2.19 that the server 326 * return an error if the client attempt to downgrade to a combination 327 * of share bits not explicable by closing some of its previous opens. 328 * 329 * XXX: This enforcement is actually incomplete, since we don't keep 330 * track of access/deny bit combinations; so, e.g., we allow: 331 * 332 * OPEN allow read, deny write 333 * OPEN allow both, deny none 334 * DOWNGRADE allow read, deny none 335 * 336 * which we should reject. 337 */ 338 static void 339 set_access(unsigned int *access, unsigned long bmap) { 340 int i; 341 342 *access = 0; 343 for (i = 1; i < 4; i++) { 344 if (test_bit(i, &bmap)) 345 *access |= i; 346 } 347 } 348 349 static void 350 set_deny(unsigned int *deny, unsigned long bmap) { 351 int i; 352 353 *deny = 0; 354 for (i = 0; i < 4; i++) { 355 if (test_bit(i, &bmap)) 356 *deny |= i ; 357 } 358 } 359 360 static int 361 test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) { 362 unsigned int access, deny; 363 364 set_access(&access, stp->st_access_bmap); 365 set_deny(&deny, stp->st_deny_bmap); 366 if ((access & open->op_share_deny) || (deny & open->op_share_access)) 367 return 0; 368 return 1; 369 } 370 371 static int nfs4_access_to_omode(u32 access) 372 { 373 switch (access & NFS4_SHARE_ACCESS_BOTH) { 374 case NFS4_SHARE_ACCESS_READ: 375 return O_RDONLY; 376 case NFS4_SHARE_ACCESS_WRITE: 377 return O_WRONLY; 378 case NFS4_SHARE_ACCESS_BOTH: 379 return O_RDWR; 380 } 381 BUG(); 382 } 383 384 static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp) 385 { 386 unsigned int access; 387 388 set_access(&access, stp->st_access_bmap); 389 return nfs4_access_to_omode(access); 390 } 391 392 static void unhash_generic_stateid(struct nfs4_stateid *stp) 393 { 394 list_del(&stp->st_hash); 395 list_del(&stp->st_perfile); 396 list_del(&stp->st_perstateowner); 397 } 398 399 static void free_generic_stateid(struct nfs4_stateid *stp) 400 { 401 int oflag; 402 403 if (stp->st_access_bmap) { 404 oflag = nfs4_access_bmap_to_omode(stp); 405 nfs4_file_put_access(stp->st_file, oflag); 406 } 407 put_nfs4_file(stp->st_file); 408 kmem_cache_free(stateid_slab, stp); 409 } 410 411 static void release_lock_stateid(struct nfs4_stateid *stp) 412 { 413 struct file *file; 414 415 unhash_generic_stateid(stp); 416 file = find_any_file(stp->st_file); 417 if (file) 418 locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); 419 free_generic_stateid(stp); 420 } 421 422 static void unhash_lockowner(struct nfs4_stateowner *sop) 423 { 424 struct nfs4_stateid *stp; 425 426 list_del(&sop->so_idhash); 427 list_del(&sop->so_strhash); 428 list_del(&sop->so_perstateid); 429 while (!list_empty(&sop->so_stateids)) { 430 stp = list_first_entry(&sop->so_stateids, 431 struct nfs4_stateid, st_perstateowner); 432 release_lock_stateid(stp); 433 } 434 } 435 436 static void release_lockowner(struct nfs4_stateowner *sop) 437 { 438 unhash_lockowner(sop); 439 nfs4_put_stateowner(sop); 440 } 441 442 static void 443 release_stateid_lockowners(struct nfs4_stateid *open_stp) 444 { 445 struct nfs4_stateowner *lock_sop; 446 447 while (!list_empty(&open_stp->st_lockowners)) { 448 lock_sop = list_entry(open_stp->st_lockowners.next, 449 struct nfs4_stateowner, so_perstateid); 450 /* list_del(&open_stp->st_lockowners); */ 451 BUG_ON(lock_sop->so_is_open_owner); 452 release_lockowner(lock_sop); 453 } 454 } 455 456 static void release_open_stateid(struct nfs4_stateid *stp) 457 { 458 unhash_generic_stateid(stp); 459 release_stateid_lockowners(stp); 460 free_generic_stateid(stp); 461 } 462 463 static void unhash_openowner(struct nfs4_stateowner *sop) 464 { 465 struct nfs4_stateid *stp; 466 467 list_del(&sop->so_idhash); 468 list_del(&sop->so_strhash); 469 list_del(&sop->so_perclient); 470 list_del(&sop->so_perstateid); /* XXX: necessary? */ 471 while (!list_empty(&sop->so_stateids)) { 472 stp = list_first_entry(&sop->so_stateids, 473 struct nfs4_stateid, st_perstateowner); 474 release_open_stateid(stp); 475 } 476 } 477 478 static void release_openowner(struct nfs4_stateowner *sop) 479 { 480 unhash_openowner(sop); 481 list_del(&sop->so_close_lru); 482 nfs4_put_stateowner(sop); 483 } 484 485 #define SESSION_HASH_SIZE 512 486 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE]; 487 488 static inline int 489 hash_sessionid(struct nfs4_sessionid *sessionid) 490 { 491 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 492 493 return sid->sequence % SESSION_HASH_SIZE; 494 } 495 496 static inline void 497 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 498 { 499 u32 *ptr = (u32 *)(&sessionid->data[0]); 500 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 501 } 502 503 static void 504 gen_sessionid(struct nfsd4_session *ses) 505 { 506 struct nfs4_client *clp = ses->se_client; 507 struct nfsd4_sessionid *sid; 508 509 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 510 sid->clientid = clp->cl_clientid; 511 sid->sequence = current_sessionid++; 512 sid->reserved = 0; 513 } 514 515 /* 516 * The protocol defines ca_maxresponssize_cached to include the size of 517 * the rpc header, but all we need to cache is the data starting after 518 * the end of the initial SEQUENCE operation--the rest we regenerate 519 * each time. Therefore we can advertise a ca_maxresponssize_cached 520 * value that is the number of bytes in our cache plus a few additional 521 * bytes. In order to stay on the safe side, and not promise more than 522 * we can cache, those additional bytes must be the minimum possible: 24 523 * bytes of rpc header (xid through accept state, with AUTH_NULL 524 * verifier), 12 for the compound header (with zero-length tag), and 44 525 * for the SEQUENCE op response: 526 */ 527 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 528 529 static void 530 free_session_slots(struct nfsd4_session *ses) 531 { 532 int i; 533 534 for (i = 0; i < ses->se_fchannel.maxreqs; i++) 535 kfree(ses->se_slots[i]); 536 } 537 538 /* 539 * We don't actually need to cache the rpc and session headers, so we 540 * can allocate a little less for each slot: 541 */ 542 static inline int slot_bytes(struct nfsd4_channel_attrs *ca) 543 { 544 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 545 } 546 547 static int nfsd4_sanitize_slot_size(u32 size) 548 { 549 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ 550 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); 551 552 return size; 553 } 554 555 /* 556 * XXX: If we run out of reserved DRC memory we could (up to a point) 557 * re-negotiate active sessions and reduce their slot usage to make 558 * rooom for new connections. For now we just fail the create session. 559 */ 560 static int nfsd4_get_drc_mem(int slotsize, u32 num) 561 { 562 int avail; 563 564 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); 565 566 spin_lock(&nfsd_drc_lock); 567 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION, 568 nfsd_drc_max_mem - nfsd_drc_mem_used); 569 num = min_t(int, num, avail / slotsize); 570 nfsd_drc_mem_used += num * slotsize; 571 spin_unlock(&nfsd_drc_lock); 572 573 return num; 574 } 575 576 static void nfsd4_put_drc_mem(int slotsize, int num) 577 { 578 spin_lock(&nfsd_drc_lock); 579 nfsd_drc_mem_used -= slotsize * num; 580 spin_unlock(&nfsd_drc_lock); 581 } 582 583 static struct nfsd4_session *alloc_session(int slotsize, int numslots) 584 { 585 struct nfsd4_session *new; 586 int mem, i; 587 588 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 589 + sizeof(struct nfsd4_session) > PAGE_SIZE); 590 mem = numslots * sizeof(struct nfsd4_slot *); 591 592 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 593 if (!new) 594 return NULL; 595 /* allocate each struct nfsd4_slot and data cache in one piece */ 596 for (i = 0; i < numslots; i++) { 597 mem = sizeof(struct nfsd4_slot) + slotsize; 598 new->se_slots[i] = kzalloc(mem, GFP_KERNEL); 599 if (!new->se_slots[i]) 600 goto out_free; 601 } 602 return new; 603 out_free: 604 while (i--) 605 kfree(new->se_slots[i]); 606 kfree(new); 607 return NULL; 608 } 609 610 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize) 611 { 612 u32 maxrpc = nfsd_serv->sv_max_mesg; 613 614 new->maxreqs = numslots; 615 new->maxresp_cached = min_t(u32, req->maxresp_cached, 616 slotsize + NFSD_MIN_HDR_SEQ_SZ); 617 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); 618 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); 619 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); 620 } 621 622 static void free_conn(struct nfsd4_conn *c) 623 { 624 svc_xprt_put(c->cn_xprt); 625 kfree(c); 626 } 627 628 static void nfsd4_conn_lost(struct svc_xpt_user *u) 629 { 630 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 631 struct nfs4_client *clp = c->cn_session->se_client; 632 633 spin_lock(&clp->cl_lock); 634 if (!list_empty(&c->cn_persession)) { 635 list_del(&c->cn_persession); 636 free_conn(c); 637 } 638 spin_unlock(&clp->cl_lock); 639 nfsd4_probe_callback(clp); 640 } 641 642 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 643 { 644 struct nfsd4_conn *conn; 645 646 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 647 if (!conn) 648 return NULL; 649 svc_xprt_get(rqstp->rq_xprt); 650 conn->cn_xprt = rqstp->rq_xprt; 651 conn->cn_flags = flags; 652 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 653 return conn; 654 } 655 656 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 657 { 658 conn->cn_session = ses; 659 list_add(&conn->cn_persession, &ses->se_conns); 660 } 661 662 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 663 { 664 struct nfs4_client *clp = ses->se_client; 665 666 spin_lock(&clp->cl_lock); 667 __nfsd4_hash_conn(conn, ses); 668 spin_unlock(&clp->cl_lock); 669 } 670 671 static int nfsd4_register_conn(struct nfsd4_conn *conn) 672 { 673 conn->cn_xpt_user.callback = nfsd4_conn_lost; 674 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 675 } 676 677 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir) 678 { 679 struct nfsd4_conn *conn; 680 int ret; 681 682 conn = alloc_conn(rqstp, dir); 683 if (!conn) 684 return nfserr_jukebox; 685 nfsd4_hash_conn(conn, ses); 686 ret = nfsd4_register_conn(conn); 687 if (ret) 688 /* oops; xprt is already down: */ 689 nfsd4_conn_lost(&conn->cn_xpt_user); 690 return nfs_ok; 691 } 692 693 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses) 694 { 695 u32 dir = NFS4_CDFC4_FORE; 696 697 if (ses->se_flags & SESSION4_BACK_CHAN) 698 dir |= NFS4_CDFC4_BACK; 699 700 return nfsd4_new_conn(rqstp, ses, dir); 701 } 702 703 /* must be called under client_lock */ 704 static void nfsd4_del_conns(struct nfsd4_session *s) 705 { 706 struct nfs4_client *clp = s->se_client; 707 struct nfsd4_conn *c; 708 709 spin_lock(&clp->cl_lock); 710 while (!list_empty(&s->se_conns)) { 711 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 712 list_del_init(&c->cn_persession); 713 spin_unlock(&clp->cl_lock); 714 715 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 716 free_conn(c); 717 718 spin_lock(&clp->cl_lock); 719 } 720 spin_unlock(&clp->cl_lock); 721 } 722 723 void free_session(struct kref *kref) 724 { 725 struct nfsd4_session *ses; 726 int mem; 727 728 ses = container_of(kref, struct nfsd4_session, se_ref); 729 nfsd4_del_conns(ses); 730 spin_lock(&nfsd_drc_lock); 731 mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); 732 nfsd_drc_mem_used -= mem; 733 spin_unlock(&nfsd_drc_lock); 734 free_session_slots(ses); 735 kfree(ses); 736 } 737 738 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) 739 { 740 struct nfsd4_session *new; 741 struct nfsd4_channel_attrs *fchan = &cses->fore_channel; 742 int numslots, slotsize; 743 int status; 744 int idx; 745 746 /* 747 * Note decreasing slot size below client's request may 748 * make it difficult for client to function correctly, whereas 749 * decreasing the number of slots will (just?) affect 750 * performance. When short on memory we therefore prefer to 751 * decrease number of slots instead of their size. 752 */ 753 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); 754 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); 755 if (numslots < 1) 756 return NULL; 757 758 new = alloc_session(slotsize, numslots); 759 if (!new) { 760 nfsd4_put_drc_mem(slotsize, fchan->maxreqs); 761 return NULL; 762 } 763 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize); 764 765 new->se_client = clp; 766 gen_sessionid(new); 767 768 INIT_LIST_HEAD(&new->se_conns); 769 770 new->se_cb_seq_nr = 1; 771 new->se_flags = cses->flags; 772 new->se_cb_prog = cses->callback_prog; 773 kref_init(&new->se_ref); 774 idx = hash_sessionid(&new->se_sessionid); 775 spin_lock(&client_lock); 776 list_add(&new->se_hash, &sessionid_hashtbl[idx]); 777 spin_lock(&clp->cl_lock); 778 list_add(&new->se_perclnt, &clp->cl_sessions); 779 spin_unlock(&clp->cl_lock); 780 spin_unlock(&client_lock); 781 782 status = nfsd4_new_conn_from_crses(rqstp, new); 783 /* whoops: benny points out, status is ignored! (err, or bogus) */ 784 if (status) { 785 free_session(&new->se_ref); 786 return NULL; 787 } 788 if (cses->flags & SESSION4_BACK_CHAN) { 789 struct sockaddr *sa = svc_addr(rqstp); 790 /* 791 * This is a little silly; with sessions there's no real 792 * use for the callback address. Use the peer address 793 * as a reasonable default for now, but consider fixing 794 * the rpc client not to require an address in the 795 * future: 796 */ 797 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 798 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 799 } 800 nfsd4_probe_callback(clp); 801 return new; 802 } 803 804 /* caller must hold client_lock */ 805 static struct nfsd4_session * 806 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid) 807 { 808 struct nfsd4_session *elem; 809 int idx; 810 811 dump_sessionid(__func__, sessionid); 812 idx = hash_sessionid(sessionid); 813 /* Search in the appropriate list */ 814 list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) { 815 if (!memcmp(elem->se_sessionid.data, sessionid->data, 816 NFS4_MAX_SESSIONID_LEN)) { 817 return elem; 818 } 819 } 820 821 dprintk("%s: session not found\n", __func__); 822 return NULL; 823 } 824 825 /* caller must hold client_lock */ 826 static void 827 unhash_session(struct nfsd4_session *ses) 828 { 829 list_del(&ses->se_hash); 830 spin_lock(&ses->se_client->cl_lock); 831 list_del(&ses->se_perclnt); 832 spin_unlock(&ses->se_client->cl_lock); 833 } 834 835 /* must be called under the client_lock */ 836 static inline void 837 renew_client_locked(struct nfs4_client *clp) 838 { 839 if (is_client_expired(clp)) { 840 dprintk("%s: client (clientid %08x/%08x) already expired\n", 841 __func__, 842 clp->cl_clientid.cl_boot, 843 clp->cl_clientid.cl_id); 844 return; 845 } 846 847 /* 848 * Move client to the end to the LRU list. 849 */ 850 dprintk("renewing client (clientid %08x/%08x)\n", 851 clp->cl_clientid.cl_boot, 852 clp->cl_clientid.cl_id); 853 list_move_tail(&clp->cl_lru, &client_lru); 854 clp->cl_time = get_seconds(); 855 } 856 857 static inline void 858 renew_client(struct nfs4_client *clp) 859 { 860 spin_lock(&client_lock); 861 renew_client_locked(clp); 862 spin_unlock(&client_lock); 863 } 864 865 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 866 static int 867 STALE_CLIENTID(clientid_t *clid) 868 { 869 if (clid->cl_boot == boot_time) 870 return 0; 871 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 872 clid->cl_boot, clid->cl_id, boot_time); 873 return 1; 874 } 875 876 /* 877 * XXX Should we use a slab cache ? 878 * This type of memory management is somewhat inefficient, but we use it 879 * anyway since SETCLIENTID is not a common operation. 880 */ 881 static struct nfs4_client *alloc_client(struct xdr_netobj name) 882 { 883 struct nfs4_client *clp; 884 885 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 886 if (clp == NULL) 887 return NULL; 888 clp->cl_name.data = kmalloc(name.len, GFP_KERNEL); 889 if (clp->cl_name.data == NULL) { 890 kfree(clp); 891 return NULL; 892 } 893 memcpy(clp->cl_name.data, name.data, name.len); 894 clp->cl_name.len = name.len; 895 return clp; 896 } 897 898 static inline void 899 free_client(struct nfs4_client *clp) 900 { 901 while (!list_empty(&clp->cl_sessions)) { 902 struct nfsd4_session *ses; 903 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 904 se_perclnt); 905 list_del(&ses->se_perclnt); 906 nfsd4_put_session(ses); 907 } 908 if (clp->cl_cred.cr_group_info) 909 put_group_info(clp->cl_cred.cr_group_info); 910 kfree(clp->cl_principal); 911 kfree(clp->cl_name.data); 912 kfree(clp); 913 } 914 915 void 916 release_session_client(struct nfsd4_session *session) 917 { 918 struct nfs4_client *clp = session->se_client; 919 920 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock)) 921 return; 922 if (is_client_expired(clp)) { 923 free_client(clp); 924 session->se_client = NULL; 925 } else 926 renew_client_locked(clp); 927 spin_unlock(&client_lock); 928 } 929 930 /* must be called under the client_lock */ 931 static inline void 932 unhash_client_locked(struct nfs4_client *clp) 933 { 934 struct nfsd4_session *ses; 935 936 mark_client_expired(clp); 937 list_del(&clp->cl_lru); 938 spin_lock(&clp->cl_lock); 939 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 940 list_del_init(&ses->se_hash); 941 spin_unlock(&clp->cl_lock); 942 } 943 944 static void 945 expire_client(struct nfs4_client *clp) 946 { 947 struct nfs4_stateowner *sop; 948 struct nfs4_delegation *dp; 949 struct list_head reaplist; 950 951 INIT_LIST_HEAD(&reaplist); 952 spin_lock(&recall_lock); 953 while (!list_empty(&clp->cl_delegations)) { 954 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 955 list_del_init(&dp->dl_perclnt); 956 list_move(&dp->dl_recall_lru, &reaplist); 957 } 958 spin_unlock(&recall_lock); 959 while (!list_empty(&reaplist)) { 960 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 961 list_del_init(&dp->dl_recall_lru); 962 unhash_delegation(dp); 963 } 964 while (!list_empty(&clp->cl_openowners)) { 965 sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient); 966 release_openowner(sop); 967 } 968 nfsd4_shutdown_callback(clp); 969 if (clp->cl_cb_conn.cb_xprt) 970 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 971 list_del(&clp->cl_idhash); 972 list_del(&clp->cl_strhash); 973 spin_lock(&client_lock); 974 unhash_client_locked(clp); 975 if (atomic_read(&clp->cl_refcount) == 0) 976 free_client(clp); 977 spin_unlock(&client_lock); 978 } 979 980 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 981 { 982 memcpy(target->cl_verifier.data, source->data, 983 sizeof(target->cl_verifier.data)); 984 } 985 986 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 987 { 988 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 989 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 990 } 991 992 static void copy_cred(struct svc_cred *target, struct svc_cred *source) 993 { 994 target->cr_uid = source->cr_uid; 995 target->cr_gid = source->cr_gid; 996 target->cr_group_info = source->cr_group_info; 997 get_group_info(target->cr_group_info); 998 } 999 1000 static int same_name(const char *n1, const char *n2) 1001 { 1002 return 0 == memcmp(n1, n2, HEXDIR_LEN); 1003 } 1004 1005 static int 1006 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 1007 { 1008 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 1009 } 1010 1011 static int 1012 same_clid(clientid_t *cl1, clientid_t *cl2) 1013 { 1014 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1015 } 1016 1017 /* XXX what about NGROUP */ 1018 static int 1019 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1020 { 1021 return cr1->cr_uid == cr2->cr_uid; 1022 } 1023 1024 static void gen_clid(struct nfs4_client *clp) 1025 { 1026 static u32 current_clientid = 1; 1027 1028 clp->cl_clientid.cl_boot = boot_time; 1029 clp->cl_clientid.cl_id = current_clientid++; 1030 } 1031 1032 static void gen_confirm(struct nfs4_client *clp) 1033 { 1034 static u32 i; 1035 u32 *p; 1036 1037 p = (u32 *)clp->cl_confirm.data; 1038 *p++ = get_seconds(); 1039 *p++ = i++; 1040 } 1041 1042 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, 1043 struct svc_rqst *rqstp, nfs4_verifier *verf) 1044 { 1045 struct nfs4_client *clp; 1046 struct sockaddr *sa = svc_addr(rqstp); 1047 char *princ; 1048 1049 clp = alloc_client(name); 1050 if (clp == NULL) 1051 return NULL; 1052 1053 INIT_LIST_HEAD(&clp->cl_sessions); 1054 1055 princ = svc_gss_principal(rqstp); 1056 if (princ) { 1057 clp->cl_principal = kstrdup(princ, GFP_KERNEL); 1058 if (clp->cl_principal == NULL) { 1059 free_client(clp); 1060 return NULL; 1061 } 1062 } 1063 1064 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); 1065 atomic_set(&clp->cl_refcount, 0); 1066 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1067 INIT_LIST_HEAD(&clp->cl_idhash); 1068 INIT_LIST_HEAD(&clp->cl_strhash); 1069 INIT_LIST_HEAD(&clp->cl_openowners); 1070 INIT_LIST_HEAD(&clp->cl_delegations); 1071 INIT_LIST_HEAD(&clp->cl_lru); 1072 INIT_LIST_HEAD(&clp->cl_callbacks); 1073 spin_lock_init(&clp->cl_lock); 1074 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); 1075 clp->cl_time = get_seconds(); 1076 clear_bit(0, &clp->cl_cb_slot_busy); 1077 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1078 copy_verf(clp, verf); 1079 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1080 clp->cl_flavor = rqstp->rq_flavor; 1081 copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1082 gen_confirm(clp); 1083 clp->cl_cb_session = NULL; 1084 return clp; 1085 } 1086 1087 static int check_name(struct xdr_netobj name) 1088 { 1089 if (name.len == 0) 1090 return 0; 1091 if (name.len > NFS4_OPAQUE_LIMIT) { 1092 dprintk("NFSD: check_name: name too long(%d)!\n", name.len); 1093 return 0; 1094 } 1095 return 1; 1096 } 1097 1098 static void 1099 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval) 1100 { 1101 unsigned int idhashval; 1102 1103 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]); 1104 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1105 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]); 1106 renew_client(clp); 1107 } 1108 1109 static void 1110 move_to_confirmed(struct nfs4_client *clp) 1111 { 1112 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1113 unsigned int strhashval; 1114 1115 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 1116 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]); 1117 strhashval = clientstr_hashval(clp->cl_recdir); 1118 list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]); 1119 renew_client(clp); 1120 } 1121 1122 static struct nfs4_client * 1123 find_confirmed_client(clientid_t *clid) 1124 { 1125 struct nfs4_client *clp; 1126 unsigned int idhashval = clientid_hashval(clid->cl_id); 1127 1128 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) { 1129 if (same_clid(&clp->cl_clientid, clid)) 1130 return clp; 1131 } 1132 return NULL; 1133 } 1134 1135 static struct nfs4_client * 1136 find_unconfirmed_client(clientid_t *clid) 1137 { 1138 struct nfs4_client *clp; 1139 unsigned int idhashval = clientid_hashval(clid->cl_id); 1140 1141 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) { 1142 if (same_clid(&clp->cl_clientid, clid)) 1143 return clp; 1144 } 1145 return NULL; 1146 } 1147 1148 static bool clp_used_exchangeid(struct nfs4_client *clp) 1149 { 1150 return clp->cl_exchange_flags != 0; 1151 } 1152 1153 static struct nfs4_client * 1154 find_confirmed_client_by_str(const char *dname, unsigned int hashval) 1155 { 1156 struct nfs4_client *clp; 1157 1158 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { 1159 if (same_name(clp->cl_recdir, dname)) 1160 return clp; 1161 } 1162 return NULL; 1163 } 1164 1165 static struct nfs4_client * 1166 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) 1167 { 1168 struct nfs4_client *clp; 1169 1170 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { 1171 if (same_name(clp->cl_recdir, dname)) 1172 return clp; 1173 } 1174 return NULL; 1175 } 1176 1177 static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr) 1178 { 1179 switch (family) { 1180 case AF_INET: 1181 ((struct sockaddr_in *)sa)->sin_family = AF_INET; 1182 ((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr; 1183 return; 1184 case AF_INET6: 1185 ((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6; 1186 ((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6; 1187 return; 1188 } 1189 } 1190 1191 static void 1192 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 1193 { 1194 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 1195 struct sockaddr *sa = svc_addr(rqstp); 1196 u32 scopeid = rpc_get_scope_id(sa); 1197 unsigned short expected_family; 1198 1199 /* Currently, we only support tcp and tcp6 for the callback channel */ 1200 if (se->se_callback_netid_len == 3 && 1201 !memcmp(se->se_callback_netid_val, "tcp", 3)) 1202 expected_family = AF_INET; 1203 else if (se->se_callback_netid_len == 4 && 1204 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 1205 expected_family = AF_INET6; 1206 else 1207 goto out_err; 1208 1209 conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, 1210 se->se_callback_addr_len, 1211 (struct sockaddr *)&conn->cb_addr, 1212 sizeof(conn->cb_addr)); 1213 1214 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 1215 goto out_err; 1216 1217 if (conn->cb_addr.ss_family == AF_INET6) 1218 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 1219 1220 conn->cb_prog = se->se_callback_prog; 1221 conn->cb_ident = se->se_callback_ident; 1222 rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr); 1223 return; 1224 out_err: 1225 conn->cb_addr.ss_family = AF_UNSPEC; 1226 conn->cb_addrlen = 0; 1227 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " 1228 "will not receive delegations\n", 1229 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 1230 1231 return; 1232 } 1233 1234 /* 1235 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. 1236 */ 1237 void 1238 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 1239 { 1240 struct nfsd4_slot *slot = resp->cstate.slot; 1241 unsigned int base; 1242 1243 dprintk("--> %s slot %p\n", __func__, slot); 1244 1245 slot->sl_opcnt = resp->opcnt; 1246 slot->sl_status = resp->cstate.status; 1247 1248 if (nfsd4_not_cached(resp)) { 1249 slot->sl_datalen = 0; 1250 return; 1251 } 1252 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; 1253 base = (char *)resp->cstate.datap - 1254 (char *)resp->xbuf->head[0].iov_base; 1255 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, 1256 slot->sl_datalen)) 1257 WARN("%s: sessions DRC could not cache compound\n", __func__); 1258 return; 1259 } 1260 1261 /* 1262 * Encode the replay sequence operation from the slot values. 1263 * If cachethis is FALSE encode the uncached rep error on the next 1264 * operation which sets resp->p and increments resp->opcnt for 1265 * nfs4svc_encode_compoundres. 1266 * 1267 */ 1268 static __be32 1269 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 1270 struct nfsd4_compoundres *resp) 1271 { 1272 struct nfsd4_op *op; 1273 struct nfsd4_slot *slot = resp->cstate.slot; 1274 1275 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__, 1276 resp->opcnt, resp->cstate.slot->sl_cachethis); 1277 1278 /* Encode the replayed sequence operation */ 1279 op = &args->ops[resp->opcnt - 1]; 1280 nfsd4_encode_operation(resp, op); 1281 1282 /* Return nfserr_retry_uncached_rep in next operation. */ 1283 if (args->opcnt > 1 && slot->sl_cachethis == 0) { 1284 op = &args->ops[resp->opcnt++]; 1285 op->status = nfserr_retry_uncached_rep; 1286 nfsd4_encode_operation(resp, op); 1287 } 1288 return op->status; 1289 } 1290 1291 /* 1292 * The sequence operation is not cached because we can use the slot and 1293 * session values. 1294 */ 1295 __be32 1296 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 1297 struct nfsd4_sequence *seq) 1298 { 1299 struct nfsd4_slot *slot = resp->cstate.slot; 1300 __be32 status; 1301 1302 dprintk("--> %s slot %p\n", __func__, slot); 1303 1304 /* Either returns 0 or nfserr_retry_uncached */ 1305 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 1306 if (status == nfserr_retry_uncached_rep) 1307 return status; 1308 1309 /* The sequence operation has been encoded, cstate->datap set. */ 1310 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); 1311 1312 resp->opcnt = slot->sl_opcnt; 1313 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); 1314 status = slot->sl_status; 1315 1316 return status; 1317 } 1318 1319 /* 1320 * Set the exchange_id flags returned by the server. 1321 */ 1322 static void 1323 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 1324 { 1325 /* pNFS is not supported */ 1326 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 1327 1328 /* Referrals are supported, Migration is not. */ 1329 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 1330 1331 /* set the wire flags to return to client. */ 1332 clid->flags = new->cl_exchange_flags; 1333 } 1334 1335 __be32 1336 nfsd4_exchange_id(struct svc_rqst *rqstp, 1337 struct nfsd4_compound_state *cstate, 1338 struct nfsd4_exchange_id *exid) 1339 { 1340 struct nfs4_client *unconf, *conf, *new; 1341 int status; 1342 unsigned int strhashval; 1343 char dname[HEXDIR_LEN]; 1344 char addr_str[INET6_ADDRSTRLEN]; 1345 nfs4_verifier verf = exid->verifier; 1346 struct sockaddr *sa = svc_addr(rqstp); 1347 1348 rpc_ntop(sa, addr_str, sizeof(addr_str)); 1349 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 1350 "ip_addr=%s flags %x, spa_how %d\n", 1351 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 1352 addr_str, exid->flags, exid->spa_how); 1353 1354 if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A)) 1355 return nfserr_inval; 1356 1357 /* Currently only support SP4_NONE */ 1358 switch (exid->spa_how) { 1359 case SP4_NONE: 1360 break; 1361 case SP4_SSV: 1362 return nfserr_serverfault; 1363 default: 1364 BUG(); /* checked by xdr code */ 1365 case SP4_MACH_CRED: 1366 return nfserr_serverfault; /* no excuse :-/ */ 1367 } 1368 1369 status = nfs4_make_rec_clidname(dname, &exid->clname); 1370 1371 if (status) 1372 goto error; 1373 1374 strhashval = clientstr_hashval(dname); 1375 1376 nfs4_lock_state(); 1377 status = nfs_ok; 1378 1379 conf = find_confirmed_client_by_str(dname, strhashval); 1380 if (conf) { 1381 if (!clp_used_exchangeid(conf)) { 1382 status = nfserr_clid_inuse; /* XXX: ? */ 1383 goto out; 1384 } 1385 if (!same_verf(&verf, &conf->cl_verifier)) { 1386 /* 18.35.4 case 8 */ 1387 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { 1388 status = nfserr_not_same; 1389 goto out; 1390 } 1391 /* Client reboot: destroy old state */ 1392 expire_client(conf); 1393 goto out_new; 1394 } 1395 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 1396 /* 18.35.4 case 9 */ 1397 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { 1398 status = nfserr_perm; 1399 goto out; 1400 } 1401 expire_client(conf); 1402 goto out_new; 1403 } 1404 /* 1405 * Set bit when the owner id and verifier map to an already 1406 * confirmed client id (18.35.3). 1407 */ 1408 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 1409 1410 /* 1411 * Falling into 18.35.4 case 2, possible router replay. 1412 * Leave confirmed record intact and return same result. 1413 */ 1414 copy_verf(conf, &verf); 1415 new = conf; 1416 goto out_copy; 1417 } 1418 1419 /* 18.35.4 case 7 */ 1420 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { 1421 status = nfserr_noent; 1422 goto out; 1423 } 1424 1425 unconf = find_unconfirmed_client_by_str(dname, strhashval); 1426 if (unconf) { 1427 /* 1428 * Possible retry or client restart. Per 18.35.4 case 4, 1429 * a new unconfirmed record should be generated regardless 1430 * of whether any properties have changed. 1431 */ 1432 expire_client(unconf); 1433 } 1434 1435 out_new: 1436 /* Normal case */ 1437 new = create_client(exid->clname, dname, rqstp, &verf); 1438 if (new == NULL) { 1439 status = nfserr_jukebox; 1440 goto out; 1441 } 1442 1443 gen_clid(new); 1444 add_to_unconfirmed(new, strhashval); 1445 out_copy: 1446 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 1447 exid->clientid.cl_id = new->cl_clientid.cl_id; 1448 1449 exid->seqid = 1; 1450 nfsd4_set_ex_flags(new, exid); 1451 1452 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 1453 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); 1454 status = nfs_ok; 1455 1456 out: 1457 nfs4_unlock_state(); 1458 error: 1459 dprintk("nfsd4_exchange_id returns %d\n", ntohl(status)); 1460 return status; 1461 } 1462 1463 static int 1464 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 1465 { 1466 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 1467 slot_seqid); 1468 1469 /* The slot is in use, and no response has been sent. */ 1470 if (slot_inuse) { 1471 if (seqid == slot_seqid) 1472 return nfserr_jukebox; 1473 else 1474 return nfserr_seq_misordered; 1475 } 1476 /* Normal */ 1477 if (likely(seqid == slot_seqid + 1)) 1478 return nfs_ok; 1479 /* Replay */ 1480 if (seqid == slot_seqid) 1481 return nfserr_replay_cache; 1482 /* Wraparound */ 1483 if (seqid == 1 && (slot_seqid + 1) == 0) 1484 return nfs_ok; 1485 /* Misordered replay or misordered new request */ 1486 return nfserr_seq_misordered; 1487 } 1488 1489 /* 1490 * Cache the create session result into the create session single DRC 1491 * slot cache by saving the xdr structure. sl_seqid has been set. 1492 * Do this for solo or embedded create session operations. 1493 */ 1494 static void 1495 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 1496 struct nfsd4_clid_slot *slot, int nfserr) 1497 { 1498 slot->sl_status = nfserr; 1499 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 1500 } 1501 1502 static __be32 1503 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 1504 struct nfsd4_clid_slot *slot) 1505 { 1506 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 1507 return slot->sl_status; 1508 } 1509 1510 __be32 1511 nfsd4_create_session(struct svc_rqst *rqstp, 1512 struct nfsd4_compound_state *cstate, 1513 struct nfsd4_create_session *cr_ses) 1514 { 1515 struct sockaddr *sa = svc_addr(rqstp); 1516 struct nfs4_client *conf, *unconf; 1517 struct nfsd4_session *new; 1518 struct nfsd4_clid_slot *cs_slot = NULL; 1519 bool confirm_me = false; 1520 int status = 0; 1521 1522 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 1523 return nfserr_inval; 1524 1525 nfs4_lock_state(); 1526 unconf = find_unconfirmed_client(&cr_ses->clientid); 1527 conf = find_confirmed_client(&cr_ses->clientid); 1528 1529 if (conf) { 1530 cs_slot = &conf->cl_cs_slot; 1531 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1532 if (status == nfserr_replay_cache) { 1533 dprintk("Got a create_session replay! seqid= %d\n", 1534 cs_slot->sl_seqid); 1535 /* Return the cached reply status */ 1536 status = nfsd4_replay_create_session(cr_ses, cs_slot); 1537 goto out; 1538 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { 1539 status = nfserr_seq_misordered; 1540 dprintk("Sequence misordered!\n"); 1541 dprintk("Expected seqid= %d but got seqid= %d\n", 1542 cs_slot->sl_seqid, cr_ses->seqid); 1543 goto out; 1544 } 1545 } else if (unconf) { 1546 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 1547 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 1548 status = nfserr_clid_inuse; 1549 goto out; 1550 } 1551 1552 cs_slot = &unconf->cl_cs_slot; 1553 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1554 if (status) { 1555 /* an unconfirmed replay returns misordered */ 1556 status = nfserr_seq_misordered; 1557 goto out; 1558 } 1559 1560 confirm_me = true; 1561 conf = unconf; 1562 } else { 1563 status = nfserr_stale_clientid; 1564 goto out; 1565 } 1566 1567 /* 1568 * XXX: we should probably set this at creation time, and check 1569 * for consistent minorversion use throughout: 1570 */ 1571 conf->cl_minorversion = 1; 1572 /* 1573 * We do not support RDMA or persistent sessions 1574 */ 1575 cr_ses->flags &= ~SESSION4_PERSIST; 1576 cr_ses->flags &= ~SESSION4_RDMA; 1577 1578 status = nfserr_jukebox; 1579 new = alloc_init_session(rqstp, conf, cr_ses); 1580 if (!new) 1581 goto out; 1582 status = nfs_ok; 1583 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 1584 NFS4_MAX_SESSIONID_LEN); 1585 memcpy(&cr_ses->fore_channel, &new->se_fchannel, 1586 sizeof(struct nfsd4_channel_attrs)); 1587 cs_slot->sl_seqid++; 1588 cr_ses->seqid = cs_slot->sl_seqid; 1589 1590 /* cache solo and embedded create sessions under the state lock */ 1591 nfsd4_cache_create_session(cr_ses, cs_slot, status); 1592 if (confirm_me) 1593 move_to_confirmed(conf); 1594 out: 1595 nfs4_unlock_state(); 1596 dprintk("%s returns %d\n", __func__, ntohl(status)); 1597 return status; 1598 } 1599 1600 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) 1601 { 1602 struct nfsd4_compoundres *resp = rqstp->rq_resp; 1603 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 1604 1605 return argp->opcnt == resp->opcnt; 1606 } 1607 1608 static __be32 nfsd4_map_bcts_dir(u32 *dir) 1609 { 1610 switch (*dir) { 1611 case NFS4_CDFC4_FORE: 1612 case NFS4_CDFC4_BACK: 1613 return nfs_ok; 1614 case NFS4_CDFC4_FORE_OR_BOTH: 1615 case NFS4_CDFC4_BACK_OR_BOTH: 1616 *dir = NFS4_CDFC4_BOTH; 1617 return nfs_ok; 1618 }; 1619 return nfserr_inval; 1620 } 1621 1622 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 1623 struct nfsd4_compound_state *cstate, 1624 struct nfsd4_bind_conn_to_session *bcts) 1625 { 1626 __be32 status; 1627 1628 if (!nfsd4_last_compound_op(rqstp)) 1629 return nfserr_not_only_op; 1630 spin_lock(&client_lock); 1631 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid); 1632 /* Sorta weird: we only need the refcnt'ing because new_conn acquires 1633 * client_lock iself: */ 1634 if (cstate->session) { 1635 nfsd4_get_session(cstate->session); 1636 atomic_inc(&cstate->session->se_client->cl_refcount); 1637 } 1638 spin_unlock(&client_lock); 1639 if (!cstate->session) 1640 return nfserr_badsession; 1641 1642 status = nfsd4_map_bcts_dir(&bcts->dir); 1643 if (!status) 1644 nfsd4_new_conn(rqstp, cstate->session, bcts->dir); 1645 return status; 1646 } 1647 1648 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 1649 { 1650 if (!session) 1651 return 0; 1652 return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); 1653 } 1654 1655 __be32 1656 nfsd4_destroy_session(struct svc_rqst *r, 1657 struct nfsd4_compound_state *cstate, 1658 struct nfsd4_destroy_session *sessionid) 1659 { 1660 struct nfsd4_session *ses; 1661 u32 status = nfserr_badsession; 1662 1663 /* Notes: 1664 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid 1665 * - Should we return nfserr_back_chan_busy if waiting for 1666 * callbacks on to-be-destroyed session? 1667 * - Do we need to clear any callback info from previous session? 1668 */ 1669 1670 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 1671 if (!nfsd4_last_compound_op(r)) 1672 return nfserr_not_only_op; 1673 } 1674 dump_sessionid(__func__, &sessionid->sessionid); 1675 spin_lock(&client_lock); 1676 ses = find_in_sessionid_hashtbl(&sessionid->sessionid); 1677 if (!ses) { 1678 spin_unlock(&client_lock); 1679 goto out; 1680 } 1681 1682 unhash_session(ses); 1683 spin_unlock(&client_lock); 1684 1685 nfs4_lock_state(); 1686 nfsd4_probe_callback_sync(ses->se_client); 1687 nfs4_unlock_state(); 1688 1689 nfsd4_del_conns(ses); 1690 1691 nfsd4_put_session(ses); 1692 status = nfs_ok; 1693 out: 1694 dprintk("%s returns %d\n", __func__, ntohl(status)); 1695 return status; 1696 } 1697 1698 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 1699 { 1700 struct nfsd4_conn *c; 1701 1702 list_for_each_entry(c, &s->se_conns, cn_persession) { 1703 if (c->cn_xprt == xpt) { 1704 return c; 1705 } 1706 } 1707 return NULL; 1708 } 1709 1710 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 1711 { 1712 struct nfs4_client *clp = ses->se_client; 1713 struct nfsd4_conn *c; 1714 int ret; 1715 1716 spin_lock(&clp->cl_lock); 1717 c = __nfsd4_find_conn(new->cn_xprt, ses); 1718 if (c) { 1719 spin_unlock(&clp->cl_lock); 1720 free_conn(new); 1721 return; 1722 } 1723 __nfsd4_hash_conn(new, ses); 1724 spin_unlock(&clp->cl_lock); 1725 ret = nfsd4_register_conn(new); 1726 if (ret) 1727 /* oops; xprt is already down: */ 1728 nfsd4_conn_lost(&new->cn_xpt_user); 1729 return; 1730 } 1731 1732 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 1733 { 1734 struct nfsd4_compoundargs *args = rqstp->rq_argp; 1735 1736 return args->opcnt > session->se_fchannel.maxops; 1737 } 1738 1739 __be32 1740 nfsd4_sequence(struct svc_rqst *rqstp, 1741 struct nfsd4_compound_state *cstate, 1742 struct nfsd4_sequence *seq) 1743 { 1744 struct nfsd4_compoundres *resp = rqstp->rq_resp; 1745 struct nfsd4_session *session; 1746 struct nfsd4_slot *slot; 1747 struct nfsd4_conn *conn; 1748 int status; 1749 1750 if (resp->opcnt != 1) 1751 return nfserr_sequence_pos; 1752 1753 /* 1754 * Will be either used or freed by nfsd4_sequence_check_conn 1755 * below. 1756 */ 1757 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 1758 if (!conn) 1759 return nfserr_jukebox; 1760 1761 spin_lock(&client_lock); 1762 status = nfserr_badsession; 1763 session = find_in_sessionid_hashtbl(&seq->sessionid); 1764 if (!session) 1765 goto out; 1766 1767 status = nfserr_too_many_ops; 1768 if (nfsd4_session_too_many_ops(rqstp, session)) 1769 goto out; 1770 1771 status = nfserr_badslot; 1772 if (seq->slotid >= session->se_fchannel.maxreqs) 1773 goto out; 1774 1775 slot = session->se_slots[seq->slotid]; 1776 dprintk("%s: slotid %d\n", __func__, seq->slotid); 1777 1778 /* We do not negotiate the number of slots yet, so set the 1779 * maxslots to the session maxreqs which is used to encode 1780 * sr_highest_slotid and the sr_target_slot id to maxslots */ 1781 seq->maxslots = session->se_fchannel.maxreqs; 1782 1783 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse); 1784 if (status == nfserr_replay_cache) { 1785 cstate->slot = slot; 1786 cstate->session = session; 1787 /* Return the cached reply status and set cstate->status 1788 * for nfsd4_proc_compound processing */ 1789 status = nfsd4_replay_cache_entry(resp, seq); 1790 cstate->status = nfserr_replay_cache; 1791 goto out; 1792 } 1793 if (status) 1794 goto out; 1795 1796 nfsd4_sequence_check_conn(conn, session); 1797 conn = NULL; 1798 1799 /* Success! bump slot seqid */ 1800 slot->sl_inuse = true; 1801 slot->sl_seqid = seq->seqid; 1802 slot->sl_cachethis = seq->cachethis; 1803 1804 cstate->slot = slot; 1805 cstate->session = session; 1806 1807 out: 1808 /* Hold a session reference until done processing the compound. */ 1809 if (cstate->session) { 1810 struct nfs4_client *clp = session->se_client; 1811 1812 nfsd4_get_session(cstate->session); 1813 atomic_inc(&clp->cl_refcount); 1814 if (clp->cl_cb_state == NFSD4_CB_DOWN) 1815 seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN; 1816 } 1817 kfree(conn); 1818 spin_unlock(&client_lock); 1819 dprintk("%s: return %d\n", __func__, ntohl(status)); 1820 return status; 1821 } 1822 1823 __be32 1824 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 1825 { 1826 int status = 0; 1827 1828 if (rc->rca_one_fs) { 1829 if (!cstate->current_fh.fh_dentry) 1830 return nfserr_nofilehandle; 1831 /* 1832 * We don't take advantage of the rca_one_fs case. 1833 * That's OK, it's optional, we can safely ignore it. 1834 */ 1835 return nfs_ok; 1836 } 1837 1838 nfs4_lock_state(); 1839 status = nfserr_complete_already; 1840 if (cstate->session->se_client->cl_firststate) 1841 goto out; 1842 1843 status = nfserr_stale_clientid; 1844 if (is_client_expired(cstate->session->se_client)) 1845 /* 1846 * The following error isn't really legal. 1847 * But we only get here if the client just explicitly 1848 * destroyed the client. Surely it no longer cares what 1849 * error it gets back on an operation for the dead 1850 * client. 1851 */ 1852 goto out; 1853 1854 status = nfs_ok; 1855 nfsd4_create_clid_dir(cstate->session->se_client); 1856 out: 1857 nfs4_unlock_state(); 1858 return status; 1859 } 1860 1861 __be32 1862 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 1863 struct nfsd4_setclientid *setclid) 1864 { 1865 struct xdr_netobj clname = { 1866 .len = setclid->se_namelen, 1867 .data = setclid->se_name, 1868 }; 1869 nfs4_verifier clverifier = setclid->se_verf; 1870 unsigned int strhashval; 1871 struct nfs4_client *conf, *unconf, *new; 1872 __be32 status; 1873 char dname[HEXDIR_LEN]; 1874 1875 if (!check_name(clname)) 1876 return nfserr_inval; 1877 1878 status = nfs4_make_rec_clidname(dname, &clname); 1879 if (status) 1880 return status; 1881 1882 /* 1883 * XXX The Duplicate Request Cache (DRC) has been checked (??) 1884 * We get here on a DRC miss. 1885 */ 1886 1887 strhashval = clientstr_hashval(dname); 1888 1889 nfs4_lock_state(); 1890 conf = find_confirmed_client_by_str(dname, strhashval); 1891 if (conf) { 1892 /* RFC 3530 14.2.33 CASE 0: */ 1893 status = nfserr_clid_inuse; 1894 if (clp_used_exchangeid(conf)) 1895 goto out; 1896 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 1897 char addr_str[INET6_ADDRSTRLEN]; 1898 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, 1899 sizeof(addr_str)); 1900 dprintk("NFSD: setclientid: string in use by client " 1901 "at %s\n", addr_str); 1902 goto out; 1903 } 1904 } 1905 /* 1906 * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION") 1907 * has a description of SETCLIENTID request processing consisting 1908 * of 5 bullet points, labeled as CASE0 - CASE4 below. 1909 */ 1910 unconf = find_unconfirmed_client_by_str(dname, strhashval); 1911 status = nfserr_resource; 1912 if (!conf) { 1913 /* 1914 * RFC 3530 14.2.33 CASE 4: 1915 * placed first, because it is the normal case 1916 */ 1917 if (unconf) 1918 expire_client(unconf); 1919 new = create_client(clname, dname, rqstp, &clverifier); 1920 if (new == NULL) 1921 goto out; 1922 gen_clid(new); 1923 } else if (same_verf(&conf->cl_verifier, &clverifier)) { 1924 /* 1925 * RFC 3530 14.2.33 CASE 1: 1926 * probable callback update 1927 */ 1928 if (unconf) { 1929 /* Note this is removing unconfirmed {*x***}, 1930 * which is stronger than RFC recommended {vxc**}. 1931 * This has the advantage that there is at most 1932 * one {*x***} in either list at any time. 1933 */ 1934 expire_client(unconf); 1935 } 1936 new = create_client(clname, dname, rqstp, &clverifier); 1937 if (new == NULL) 1938 goto out; 1939 copy_clid(new, conf); 1940 } else if (!unconf) { 1941 /* 1942 * RFC 3530 14.2.33 CASE 2: 1943 * probable client reboot; state will be removed if 1944 * confirmed. 1945 */ 1946 new = create_client(clname, dname, rqstp, &clverifier); 1947 if (new == NULL) 1948 goto out; 1949 gen_clid(new); 1950 } else { 1951 /* 1952 * RFC 3530 14.2.33 CASE 3: 1953 * probable client reboot; state will be removed if 1954 * confirmed. 1955 */ 1956 expire_client(unconf); 1957 new = create_client(clname, dname, rqstp, &clverifier); 1958 if (new == NULL) 1959 goto out; 1960 gen_clid(new); 1961 } 1962 /* 1963 * XXX: we should probably set this at creation time, and check 1964 * for consistent minorversion use throughout: 1965 */ 1966 new->cl_minorversion = 0; 1967 gen_callback(new, setclid, rqstp); 1968 add_to_unconfirmed(new, strhashval); 1969 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 1970 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 1971 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 1972 status = nfs_ok; 1973 out: 1974 nfs4_unlock_state(); 1975 return status; 1976 } 1977 1978 1979 /* 1980 * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has 1981 * a description of SETCLIENTID_CONFIRM request processing consisting of 4 1982 * bullets, labeled as CASE1 - CASE4 below. 1983 */ 1984 __be32 1985 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 1986 struct nfsd4_compound_state *cstate, 1987 struct nfsd4_setclientid_confirm *setclientid_confirm) 1988 { 1989 struct sockaddr *sa = svc_addr(rqstp); 1990 struct nfs4_client *conf, *unconf; 1991 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 1992 clientid_t * clid = &setclientid_confirm->sc_clientid; 1993 __be32 status; 1994 1995 if (STALE_CLIENTID(clid)) 1996 return nfserr_stale_clientid; 1997 /* 1998 * XXX The Duplicate Request Cache (DRC) has been checked (??) 1999 * We get here on a DRC miss. 2000 */ 2001 2002 nfs4_lock_state(); 2003 2004 conf = find_confirmed_client(clid); 2005 unconf = find_unconfirmed_client(clid); 2006 2007 status = nfserr_clid_inuse; 2008 if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa)) 2009 goto out; 2010 if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa)) 2011 goto out; 2012 2013 /* 2014 * section 14.2.34 of RFC 3530 has a description of 2015 * SETCLIENTID_CONFIRM request processing consisting 2016 * of 4 bullet points, labeled as CASE1 - CASE4 below. 2017 */ 2018 if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) { 2019 /* 2020 * RFC 3530 14.2.34 CASE 1: 2021 * callback update 2022 */ 2023 if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) 2024 status = nfserr_clid_inuse; 2025 else { 2026 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 2027 nfsd4_probe_callback(conf); 2028 expire_client(unconf); 2029 status = nfs_ok; 2030 2031 } 2032 } else if (conf && !unconf) { 2033 /* 2034 * RFC 3530 14.2.34 CASE 2: 2035 * probable retransmitted request; play it safe and 2036 * do nothing. 2037 */ 2038 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) 2039 status = nfserr_clid_inuse; 2040 else 2041 status = nfs_ok; 2042 } else if (!conf && unconf 2043 && same_verf(&unconf->cl_confirm, &confirm)) { 2044 /* 2045 * RFC 3530 14.2.34 CASE 3: 2046 * Normal case; new or rebooted client: 2047 */ 2048 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { 2049 status = nfserr_clid_inuse; 2050 } else { 2051 unsigned int hash = 2052 clientstr_hashval(unconf->cl_recdir); 2053 conf = find_confirmed_client_by_str(unconf->cl_recdir, 2054 hash); 2055 if (conf) { 2056 nfsd4_remove_clid_dir(conf); 2057 expire_client(conf); 2058 } 2059 move_to_confirmed(unconf); 2060 conf = unconf; 2061 nfsd4_probe_callback(conf); 2062 status = nfs_ok; 2063 } 2064 } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) 2065 && (!unconf || (unconf && !same_verf(&unconf->cl_confirm, 2066 &confirm)))) { 2067 /* 2068 * RFC 3530 14.2.34 CASE 4: 2069 * Client probably hasn't noticed that we rebooted yet. 2070 */ 2071 status = nfserr_stale_clientid; 2072 } else { 2073 /* check that we have hit one of the cases...*/ 2074 status = nfserr_clid_inuse; 2075 } 2076 out: 2077 nfs4_unlock_state(); 2078 return status; 2079 } 2080 2081 /* OPEN Share state helper functions */ 2082 static inline struct nfs4_file * 2083 alloc_init_file(struct inode *ino) 2084 { 2085 struct nfs4_file *fp; 2086 unsigned int hashval = file_hashval(ino); 2087 2088 fp = kmem_cache_alloc(file_slab, GFP_KERNEL); 2089 if (fp) { 2090 atomic_set(&fp->fi_ref, 1); 2091 INIT_LIST_HEAD(&fp->fi_hash); 2092 INIT_LIST_HEAD(&fp->fi_stateids); 2093 INIT_LIST_HEAD(&fp->fi_delegations); 2094 fp->fi_inode = igrab(ino); 2095 fp->fi_id = current_fileid++; 2096 fp->fi_had_conflict = false; 2097 fp->fi_lease = NULL; 2098 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 2099 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 2100 spin_lock(&recall_lock); 2101 list_add(&fp->fi_hash, &file_hashtbl[hashval]); 2102 spin_unlock(&recall_lock); 2103 return fp; 2104 } 2105 return NULL; 2106 } 2107 2108 static void 2109 nfsd4_free_slab(struct kmem_cache **slab) 2110 { 2111 if (*slab == NULL) 2112 return; 2113 kmem_cache_destroy(*slab); 2114 *slab = NULL; 2115 } 2116 2117 void 2118 nfsd4_free_slabs(void) 2119 { 2120 nfsd4_free_slab(&stateowner_slab); 2121 nfsd4_free_slab(&file_slab); 2122 nfsd4_free_slab(&stateid_slab); 2123 nfsd4_free_slab(&deleg_slab); 2124 } 2125 2126 static int 2127 nfsd4_init_slabs(void) 2128 { 2129 stateowner_slab = kmem_cache_create("nfsd4_stateowners", 2130 sizeof(struct nfs4_stateowner), 0, 0, NULL); 2131 if (stateowner_slab == NULL) 2132 goto out_nomem; 2133 file_slab = kmem_cache_create("nfsd4_files", 2134 sizeof(struct nfs4_file), 0, 0, NULL); 2135 if (file_slab == NULL) 2136 goto out_nomem; 2137 stateid_slab = kmem_cache_create("nfsd4_stateids", 2138 sizeof(struct nfs4_stateid), 0, 0, NULL); 2139 if (stateid_slab == NULL) 2140 goto out_nomem; 2141 deleg_slab = kmem_cache_create("nfsd4_delegations", 2142 sizeof(struct nfs4_delegation), 0, 0, NULL); 2143 if (deleg_slab == NULL) 2144 goto out_nomem; 2145 return 0; 2146 out_nomem: 2147 nfsd4_free_slabs(); 2148 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 2149 return -ENOMEM; 2150 } 2151 2152 void 2153 nfs4_free_stateowner(struct kref *kref) 2154 { 2155 struct nfs4_stateowner *sop = 2156 container_of(kref, struct nfs4_stateowner, so_ref); 2157 kfree(sop->so_owner.data); 2158 kmem_cache_free(stateowner_slab, sop); 2159 } 2160 2161 static inline struct nfs4_stateowner * 2162 alloc_stateowner(struct xdr_netobj *owner) 2163 { 2164 struct nfs4_stateowner *sop; 2165 2166 if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) { 2167 if ((sop->so_owner.data = kmalloc(owner->len, GFP_KERNEL))) { 2168 memcpy(sop->so_owner.data, owner->data, owner->len); 2169 sop->so_owner.len = owner->len; 2170 kref_init(&sop->so_ref); 2171 return sop; 2172 } 2173 kmem_cache_free(stateowner_slab, sop); 2174 } 2175 return NULL; 2176 } 2177 2178 static struct nfs4_stateowner * 2179 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { 2180 struct nfs4_stateowner *sop; 2181 struct nfs4_replay *rp; 2182 unsigned int idhashval; 2183 2184 if (!(sop = alloc_stateowner(&open->op_owner))) 2185 return NULL; 2186 idhashval = ownerid_hashval(current_ownerid); 2187 INIT_LIST_HEAD(&sop->so_idhash); 2188 INIT_LIST_HEAD(&sop->so_strhash); 2189 INIT_LIST_HEAD(&sop->so_perclient); 2190 INIT_LIST_HEAD(&sop->so_stateids); 2191 INIT_LIST_HEAD(&sop->so_perstateid); /* not used */ 2192 INIT_LIST_HEAD(&sop->so_close_lru); 2193 sop->so_time = 0; 2194 list_add(&sop->so_idhash, &ownerid_hashtbl[idhashval]); 2195 list_add(&sop->so_strhash, &ownerstr_hashtbl[strhashval]); 2196 list_add(&sop->so_perclient, &clp->cl_openowners); 2197 sop->so_is_open_owner = 1; 2198 sop->so_id = current_ownerid++; 2199 sop->so_client = clp; 2200 sop->so_seqid = open->op_seqid; 2201 sop->so_confirmed = 0; 2202 rp = &sop->so_replay; 2203 rp->rp_status = nfserr_serverfault; 2204 rp->rp_buflen = 0; 2205 rp->rp_buf = rp->rp_ibuf; 2206 return sop; 2207 } 2208 2209 static inline void 2210 init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 2211 struct nfs4_stateowner *sop = open->op_stateowner; 2212 unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id); 2213 2214 INIT_LIST_HEAD(&stp->st_hash); 2215 INIT_LIST_HEAD(&stp->st_perstateowner); 2216 INIT_LIST_HEAD(&stp->st_lockowners); 2217 INIT_LIST_HEAD(&stp->st_perfile); 2218 list_add(&stp->st_hash, &stateid_hashtbl[hashval]); 2219 list_add(&stp->st_perstateowner, &sop->so_stateids); 2220 list_add(&stp->st_perfile, &fp->fi_stateids); 2221 stp->st_stateowner = sop; 2222 get_nfs4_file(fp); 2223 stp->st_file = fp; 2224 stp->st_stateid.si_boot = boot_time; 2225 stp->st_stateid.si_stateownerid = sop->so_id; 2226 stp->st_stateid.si_fileid = fp->fi_id; 2227 stp->st_stateid.si_generation = 0; 2228 stp->st_access_bmap = 0; 2229 stp->st_deny_bmap = 0; 2230 __set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK, 2231 &stp->st_access_bmap); 2232 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2233 stp->st_openstp = NULL; 2234 } 2235 2236 static void 2237 move_to_close_lru(struct nfs4_stateowner *sop) 2238 { 2239 dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop); 2240 2241 list_move_tail(&sop->so_close_lru, &close_lru); 2242 sop->so_time = get_seconds(); 2243 } 2244 2245 static int 2246 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, 2247 clientid_t *clid) 2248 { 2249 return (sop->so_owner.len == owner->len) && 2250 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && 2251 (sop->so_client->cl_clientid.cl_id == clid->cl_id); 2252 } 2253 2254 static struct nfs4_stateowner * 2255 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open) 2256 { 2257 struct nfs4_stateowner *so = NULL; 2258 2259 list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) { 2260 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) 2261 return so; 2262 } 2263 return NULL; 2264 } 2265 2266 /* search file_hashtbl[] for file */ 2267 static struct nfs4_file * 2268 find_file(struct inode *ino) 2269 { 2270 unsigned int hashval = file_hashval(ino); 2271 struct nfs4_file *fp; 2272 2273 spin_lock(&recall_lock); 2274 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 2275 if (fp->fi_inode == ino) { 2276 get_nfs4_file(fp); 2277 spin_unlock(&recall_lock); 2278 return fp; 2279 } 2280 } 2281 spin_unlock(&recall_lock); 2282 return NULL; 2283 } 2284 2285 static inline int access_valid(u32 x, u32 minorversion) 2286 { 2287 if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ) 2288 return 0; 2289 if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH) 2290 return 0; 2291 x &= ~NFS4_SHARE_ACCESS_MASK; 2292 if (minorversion && x) { 2293 if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL) 2294 return 0; 2295 if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED) 2296 return 0; 2297 x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK); 2298 } 2299 if (x) 2300 return 0; 2301 return 1; 2302 } 2303 2304 static inline int deny_valid(u32 x) 2305 { 2306 /* Note: unlike access bits, deny bits may be zero. */ 2307 return x <= NFS4_SHARE_DENY_BOTH; 2308 } 2309 2310 /* 2311 * Called to check deny when READ with all zero stateid or 2312 * WRITE with all zero or all one stateid 2313 */ 2314 static __be32 2315 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 2316 { 2317 struct inode *ino = current_fh->fh_dentry->d_inode; 2318 struct nfs4_file *fp; 2319 struct nfs4_stateid *stp; 2320 __be32 ret; 2321 2322 dprintk("NFSD: nfs4_share_conflict\n"); 2323 2324 fp = find_file(ino); 2325 if (!fp) 2326 return nfs_ok; 2327 ret = nfserr_locked; 2328 /* Search for conflicting share reservations */ 2329 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 2330 if (test_bit(deny_type, &stp->st_deny_bmap) || 2331 test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap)) 2332 goto out; 2333 } 2334 ret = nfs_ok; 2335 out: 2336 put_nfs4_file(fp); 2337 return ret; 2338 } 2339 2340 static inline void 2341 nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) 2342 { 2343 if (share_access & NFS4_SHARE_ACCESS_WRITE) 2344 nfs4_file_put_access(fp, O_WRONLY); 2345 if (share_access & NFS4_SHARE_ACCESS_READ) 2346 nfs4_file_put_access(fp, O_RDONLY); 2347 } 2348 2349 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 2350 { 2351 /* We're assuming the state code never drops its reference 2352 * without first removing the lease. Since we're in this lease 2353 * callback (and since the lease code is serialized by the kernel 2354 * lock) we know the server hasn't removed the lease yet, we know 2355 * it's safe to take a reference: */ 2356 atomic_inc(&dp->dl_count); 2357 2358 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 2359 2360 /* only place dl_time is set. protected by lock_flocks*/ 2361 dp->dl_time = get_seconds(); 2362 2363 nfsd4_cb_recall(dp); 2364 } 2365 2366 /* Called from break_lease() with lock_flocks() held. */ 2367 static void nfsd_break_deleg_cb(struct file_lock *fl) 2368 { 2369 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; 2370 struct nfs4_delegation *dp; 2371 2372 BUG_ON(!fp); 2373 /* We assume break_lease is only called once per lease: */ 2374 BUG_ON(fp->fi_had_conflict); 2375 /* 2376 * We don't want the locks code to timeout the lease for us; 2377 * we'll remove it ourself if a delegation isn't returned 2378 * in time: 2379 */ 2380 fl->fl_break_time = 0; 2381 2382 spin_lock(&recall_lock); 2383 fp->fi_had_conflict = true; 2384 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2385 nfsd_break_one_deleg(dp); 2386 spin_unlock(&recall_lock); 2387 } 2388 2389 static 2390 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) 2391 { 2392 if (arg & F_UNLCK) 2393 return lease_modify(onlist, arg); 2394 else 2395 return -EAGAIN; 2396 } 2397 2398 static const struct lock_manager_operations nfsd_lease_mng_ops = { 2399 .fl_break = nfsd_break_deleg_cb, 2400 .fl_change = nfsd_change_deleg_cb, 2401 }; 2402 2403 2404 __be32 2405 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 2406 struct nfsd4_open *open) 2407 { 2408 clientid_t *clientid = &open->op_clientid; 2409 struct nfs4_client *clp = NULL; 2410 unsigned int strhashval; 2411 struct nfs4_stateowner *sop = NULL; 2412 2413 if (!check_name(open->op_owner)) 2414 return nfserr_inval; 2415 2416 if (STALE_CLIENTID(&open->op_clientid)) 2417 return nfserr_stale_clientid; 2418 2419 strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner); 2420 sop = find_openstateowner_str(strhashval, open); 2421 open->op_stateowner = sop; 2422 if (!sop) { 2423 /* Make sure the client's lease hasn't expired. */ 2424 clp = find_confirmed_client(clientid); 2425 if (clp == NULL) 2426 return nfserr_expired; 2427 goto renew; 2428 } 2429 /* When sessions are used, skip open sequenceid processing */ 2430 if (nfsd4_has_session(cstate)) 2431 goto renew; 2432 if (!sop->so_confirmed) { 2433 /* Replace unconfirmed owners without checking for replay. */ 2434 clp = sop->so_client; 2435 release_openowner(sop); 2436 open->op_stateowner = NULL; 2437 goto renew; 2438 } 2439 if (open->op_seqid == sop->so_seqid - 1) { 2440 if (sop->so_replay.rp_buflen) 2441 return nfserr_replay_me; 2442 /* The original OPEN failed so spectacularly 2443 * that we don't even have replay data saved! 2444 * Therefore, we have no choice but to continue 2445 * processing this OPEN; presumably, we'll 2446 * fail again for the same reason. 2447 */ 2448 dprintk("nfsd4_process_open1: replay with no replay cache\n"); 2449 goto renew; 2450 } 2451 if (open->op_seqid != sop->so_seqid) 2452 return nfserr_bad_seqid; 2453 renew: 2454 if (open->op_stateowner == NULL) { 2455 sop = alloc_init_open_stateowner(strhashval, clp, open); 2456 if (sop == NULL) 2457 return nfserr_resource; 2458 open->op_stateowner = sop; 2459 } 2460 list_del_init(&sop->so_close_lru); 2461 renew_client(sop->so_client); 2462 return nfs_ok; 2463 } 2464 2465 static inline __be32 2466 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 2467 { 2468 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 2469 return nfserr_openmode; 2470 else 2471 return nfs_ok; 2472 } 2473 2474 static struct nfs4_delegation * 2475 find_delegation_file(struct nfs4_file *fp, stateid_t *stid) 2476 { 2477 struct nfs4_delegation *dp; 2478 2479 spin_lock(&recall_lock); 2480 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2481 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) { 2482 spin_unlock(&recall_lock); 2483 return dp; 2484 } 2485 spin_unlock(&recall_lock); 2486 return NULL; 2487 } 2488 2489 static int share_access_to_flags(u32 share_access) 2490 { 2491 share_access &= ~NFS4_SHARE_WANT_MASK; 2492 2493 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 2494 } 2495 2496 static __be32 2497 nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open, 2498 struct nfs4_delegation **dp) 2499 { 2500 int flags; 2501 __be32 status = nfserr_bad_stateid; 2502 2503 *dp = find_delegation_file(fp, &open->op_delegate_stateid); 2504 if (*dp == NULL) 2505 goto out; 2506 flags = share_access_to_flags(open->op_share_access); 2507 status = nfs4_check_delegmode(*dp, flags); 2508 if (status) 2509 *dp = NULL; 2510 out: 2511 if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR) 2512 return nfs_ok; 2513 if (status) 2514 return status; 2515 open->op_stateowner->so_confirmed = 1; 2516 return nfs_ok; 2517 } 2518 2519 static __be32 2520 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_stateid **stpp) 2521 { 2522 struct nfs4_stateid *local; 2523 __be32 status = nfserr_share_denied; 2524 struct nfs4_stateowner *sop = open->op_stateowner; 2525 2526 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 2527 /* ignore lock owners */ 2528 if (local->st_stateowner->so_is_open_owner == 0) 2529 continue; 2530 /* remember if we have seen this open owner */ 2531 if (local->st_stateowner == sop) 2532 *stpp = local; 2533 /* check for conflicting share reservations */ 2534 if (!test_share(local, open)) 2535 goto out; 2536 } 2537 status = 0; 2538 out: 2539 return status; 2540 } 2541 2542 static inline struct nfs4_stateid * 2543 nfs4_alloc_stateid(void) 2544 { 2545 return kmem_cache_alloc(stateid_slab, GFP_KERNEL); 2546 } 2547 2548 static inline int nfs4_access_to_access(u32 nfs4_access) 2549 { 2550 int flags = 0; 2551 2552 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 2553 flags |= NFSD_MAY_READ; 2554 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 2555 flags |= NFSD_MAY_WRITE; 2556 return flags; 2557 } 2558 2559 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file 2560 *fp, struct svc_fh *cur_fh, u32 nfs4_access) 2561 { 2562 __be32 status; 2563 int oflag = nfs4_access_to_omode(nfs4_access); 2564 int access = nfs4_access_to_access(nfs4_access); 2565 2566 if (!fp->fi_fds[oflag]) { 2567 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, 2568 &fp->fi_fds[oflag]); 2569 if (status) 2570 return status; 2571 } 2572 nfs4_file_get_access(fp, oflag); 2573 2574 return nfs_ok; 2575 } 2576 2577 static __be32 2578 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp, 2579 struct nfs4_file *fp, struct svc_fh *cur_fh, 2580 struct nfsd4_open *open) 2581 { 2582 struct nfs4_stateid *stp; 2583 __be32 status; 2584 2585 stp = nfs4_alloc_stateid(); 2586 if (stp == NULL) 2587 return nfserr_resource; 2588 2589 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open->op_share_access); 2590 if (status) { 2591 kmem_cache_free(stateid_slab, stp); 2592 return status; 2593 } 2594 *stpp = stp; 2595 return 0; 2596 } 2597 2598 static inline __be32 2599 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 2600 struct nfsd4_open *open) 2601 { 2602 struct iattr iattr = { 2603 .ia_valid = ATTR_SIZE, 2604 .ia_size = 0, 2605 }; 2606 if (!open->op_truncate) 2607 return 0; 2608 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 2609 return nfserr_inval; 2610 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 2611 } 2612 2613 static __be32 2614 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open) 2615 { 2616 u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK; 2617 bool new_access; 2618 __be32 status; 2619 2620 new_access = !test_bit(op_share_access, &stp->st_access_bmap); 2621 if (new_access) { 2622 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, op_share_access); 2623 if (status) 2624 return status; 2625 } 2626 status = nfsd4_truncate(rqstp, cur_fh, open); 2627 if (status) { 2628 if (new_access) { 2629 int oflag = nfs4_access_to_omode(new_access); 2630 nfs4_file_put_access(fp, oflag); 2631 } 2632 return status; 2633 } 2634 /* remember the open */ 2635 __set_bit(op_share_access, &stp->st_access_bmap); 2636 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2637 2638 return nfs_ok; 2639 } 2640 2641 2642 static void 2643 nfs4_set_claim_prev(struct nfsd4_open *open) 2644 { 2645 open->op_stateowner->so_confirmed = 1; 2646 open->op_stateowner->so_client->cl_firststate = 1; 2647 } 2648 2649 /* Should we give out recallable state?: */ 2650 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 2651 { 2652 if (clp->cl_cb_state == NFSD4_CB_UP) 2653 return true; 2654 /* 2655 * In the sessions case, since we don't have to establish a 2656 * separate connection for callbacks, we assume it's OK 2657 * until we hear otherwise: 2658 */ 2659 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 2660 } 2661 2662 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) 2663 { 2664 struct file_lock *fl; 2665 2666 fl = locks_alloc_lock(); 2667 if (!fl) 2668 return NULL; 2669 locks_init_lock(fl); 2670 fl->fl_lmops = &nfsd_lease_mng_ops; 2671 fl->fl_flags = FL_LEASE; 2672 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 2673 fl->fl_end = OFFSET_MAX; 2674 fl->fl_owner = (fl_owner_t)(dp->dl_file); 2675 fl->fl_pid = current->tgid; 2676 return fl; 2677 } 2678 2679 static int nfs4_setlease(struct nfs4_delegation *dp, int flag) 2680 { 2681 struct nfs4_file *fp = dp->dl_file; 2682 struct file_lock *fl; 2683 int status; 2684 2685 fl = nfs4_alloc_init_lease(dp, flag); 2686 if (!fl) 2687 return -ENOMEM; 2688 fl->fl_file = find_readable_file(fp); 2689 list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); 2690 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); 2691 if (status) { 2692 list_del_init(&dp->dl_perclnt); 2693 locks_free_lock(fl); 2694 return -ENOMEM; 2695 } 2696 fp->fi_lease = fl; 2697 fp->fi_deleg_file = fl->fl_file; 2698 get_file(fp->fi_deleg_file); 2699 atomic_set(&fp->fi_delegees, 1); 2700 list_add(&dp->dl_perfile, &fp->fi_delegations); 2701 return 0; 2702 } 2703 2704 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) 2705 { 2706 struct nfs4_file *fp = dp->dl_file; 2707 2708 if (!fp->fi_lease) 2709 return nfs4_setlease(dp, flag); 2710 spin_lock(&recall_lock); 2711 if (fp->fi_had_conflict) { 2712 spin_unlock(&recall_lock); 2713 return -EAGAIN; 2714 } 2715 atomic_inc(&fp->fi_delegees); 2716 list_add(&dp->dl_perfile, &fp->fi_delegations); 2717 spin_unlock(&recall_lock); 2718 list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); 2719 return 0; 2720 } 2721 2722 /* 2723 * Attempt to hand out a delegation. 2724 */ 2725 static void 2726 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_stateid *stp) 2727 { 2728 struct nfs4_delegation *dp; 2729 struct nfs4_stateowner *sop = stp->st_stateowner; 2730 int cb_up; 2731 int status, flag = 0; 2732 2733 cb_up = nfsd4_cb_channel_good(sop->so_client); 2734 flag = NFS4_OPEN_DELEGATE_NONE; 2735 open->op_recall = 0; 2736 switch (open->op_claim_type) { 2737 case NFS4_OPEN_CLAIM_PREVIOUS: 2738 if (!cb_up) 2739 open->op_recall = 1; 2740 flag = open->op_delegate_type; 2741 if (flag == NFS4_OPEN_DELEGATE_NONE) 2742 goto out; 2743 break; 2744 case NFS4_OPEN_CLAIM_NULL: 2745 /* Let's not give out any delegations till everyone's 2746 * had the chance to reclaim theirs.... */ 2747 if (locks_in_grace()) 2748 goto out; 2749 if (!cb_up || !sop->so_confirmed) 2750 goto out; 2751 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 2752 flag = NFS4_OPEN_DELEGATE_WRITE; 2753 else 2754 flag = NFS4_OPEN_DELEGATE_READ; 2755 break; 2756 default: 2757 goto out; 2758 } 2759 2760 dp = alloc_init_deleg(sop->so_client, stp, fh, flag); 2761 if (dp == NULL) 2762 goto out_no_deleg; 2763 status = nfs4_set_delegation(dp, flag); 2764 if (status) 2765 goto out_free; 2766 2767 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); 2768 2769 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 2770 STATEID_VAL(&dp->dl_stateid)); 2771 out: 2772 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS 2773 && flag == NFS4_OPEN_DELEGATE_NONE 2774 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) 2775 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 2776 open->op_delegate_type = flag; 2777 return; 2778 out_free: 2779 nfs4_put_delegation(dp); 2780 out_no_deleg: 2781 flag = NFS4_OPEN_DELEGATE_NONE; 2782 goto out; 2783 } 2784 2785 /* 2786 * called with nfs4_lock_state() held. 2787 */ 2788 __be32 2789 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 2790 { 2791 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2792 struct nfs4_file *fp = NULL; 2793 struct inode *ino = current_fh->fh_dentry->d_inode; 2794 struct nfs4_stateid *stp = NULL; 2795 struct nfs4_delegation *dp = NULL; 2796 __be32 status; 2797 2798 status = nfserr_inval; 2799 if (!access_valid(open->op_share_access, resp->cstate.minorversion) 2800 || !deny_valid(open->op_share_deny)) 2801 goto out; 2802 /* 2803 * Lookup file; if found, lookup stateid and check open request, 2804 * and check for delegations in the process of being recalled. 2805 * If not found, create the nfs4_file struct 2806 */ 2807 fp = find_file(ino); 2808 if (fp) { 2809 if ((status = nfs4_check_open(fp, open, &stp))) 2810 goto out; 2811 status = nfs4_check_deleg(fp, open, &dp); 2812 if (status) 2813 goto out; 2814 } else { 2815 status = nfserr_bad_stateid; 2816 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR) 2817 goto out; 2818 status = nfserr_resource; 2819 fp = alloc_init_file(ino); 2820 if (fp == NULL) 2821 goto out; 2822 } 2823 2824 /* 2825 * OPEN the file, or upgrade an existing OPEN. 2826 * If truncate fails, the OPEN fails. 2827 */ 2828 if (stp) { 2829 /* Stateid was found, this is an OPEN upgrade */ 2830 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 2831 if (status) 2832 goto out; 2833 update_stateid(&stp->st_stateid); 2834 } else { 2835 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open); 2836 if (status) 2837 goto out; 2838 init_stateid(stp, fp, open); 2839 status = nfsd4_truncate(rqstp, current_fh, open); 2840 if (status) { 2841 release_open_stateid(stp); 2842 goto out; 2843 } 2844 if (nfsd4_has_session(&resp->cstate)) 2845 update_stateid(&stp->st_stateid); 2846 } 2847 memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t)); 2848 2849 if (nfsd4_has_session(&resp->cstate)) 2850 open->op_stateowner->so_confirmed = 1; 2851 2852 /* 2853 * Attempt to hand out a delegation. No error return, because the 2854 * OPEN succeeds even if we fail. 2855 */ 2856 nfs4_open_delegation(current_fh, open, stp); 2857 2858 status = nfs_ok; 2859 2860 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 2861 STATEID_VAL(&stp->st_stateid)); 2862 out: 2863 if (fp) 2864 put_nfs4_file(fp); 2865 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 2866 nfs4_set_claim_prev(open); 2867 /* 2868 * To finish the open response, we just need to set the rflags. 2869 */ 2870 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 2871 if (!open->op_stateowner->so_confirmed && 2872 !nfsd4_has_session(&resp->cstate)) 2873 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 2874 2875 return status; 2876 } 2877 2878 __be32 2879 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2880 clientid_t *clid) 2881 { 2882 struct nfs4_client *clp; 2883 __be32 status; 2884 2885 nfs4_lock_state(); 2886 dprintk("process_renew(%08x/%08x): starting\n", 2887 clid->cl_boot, clid->cl_id); 2888 status = nfserr_stale_clientid; 2889 if (STALE_CLIENTID(clid)) 2890 goto out; 2891 clp = find_confirmed_client(clid); 2892 status = nfserr_expired; 2893 if (clp == NULL) { 2894 /* We assume the client took too long to RENEW. */ 2895 dprintk("nfsd4_renew: clientid not found!\n"); 2896 goto out; 2897 } 2898 renew_client(clp); 2899 status = nfserr_cb_path_down; 2900 if (!list_empty(&clp->cl_delegations) 2901 && clp->cl_cb_state != NFSD4_CB_UP) 2902 goto out; 2903 status = nfs_ok; 2904 out: 2905 nfs4_unlock_state(); 2906 return status; 2907 } 2908 2909 static struct lock_manager nfsd4_manager = { 2910 }; 2911 2912 static void 2913 nfsd4_end_grace(void) 2914 { 2915 dprintk("NFSD: end of grace period\n"); 2916 nfsd4_recdir_purge_old(); 2917 locks_end_grace(&nfsd4_manager); 2918 /* 2919 * Now that every NFSv4 client has had the chance to recover and 2920 * to see the (possibly new, possibly shorter) lease time, we 2921 * can safely set the next grace time to the current lease time: 2922 */ 2923 nfsd4_grace = nfsd4_lease; 2924 } 2925 2926 static time_t 2927 nfs4_laundromat(void) 2928 { 2929 struct nfs4_client *clp; 2930 struct nfs4_stateowner *sop; 2931 struct nfs4_delegation *dp; 2932 struct list_head *pos, *next, reaplist; 2933 time_t cutoff = get_seconds() - nfsd4_lease; 2934 time_t t, clientid_val = nfsd4_lease; 2935 time_t u, test_val = nfsd4_lease; 2936 2937 nfs4_lock_state(); 2938 2939 dprintk("NFSD: laundromat service - starting\n"); 2940 if (locks_in_grace()) 2941 nfsd4_end_grace(); 2942 INIT_LIST_HEAD(&reaplist); 2943 spin_lock(&client_lock); 2944 list_for_each_safe(pos, next, &client_lru) { 2945 clp = list_entry(pos, struct nfs4_client, cl_lru); 2946 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 2947 t = clp->cl_time - cutoff; 2948 if (clientid_val > t) 2949 clientid_val = t; 2950 break; 2951 } 2952 if (atomic_read(&clp->cl_refcount)) { 2953 dprintk("NFSD: client in use (clientid %08x)\n", 2954 clp->cl_clientid.cl_id); 2955 continue; 2956 } 2957 unhash_client_locked(clp); 2958 list_add(&clp->cl_lru, &reaplist); 2959 } 2960 spin_unlock(&client_lock); 2961 list_for_each_safe(pos, next, &reaplist) { 2962 clp = list_entry(pos, struct nfs4_client, cl_lru); 2963 dprintk("NFSD: purging unused client (clientid %08x)\n", 2964 clp->cl_clientid.cl_id); 2965 nfsd4_remove_clid_dir(clp); 2966 expire_client(clp); 2967 } 2968 spin_lock(&recall_lock); 2969 list_for_each_safe(pos, next, &del_recall_lru) { 2970 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 2971 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 2972 u = dp->dl_time - cutoff; 2973 if (test_val > u) 2974 test_val = u; 2975 break; 2976 } 2977 list_move(&dp->dl_recall_lru, &reaplist); 2978 } 2979 spin_unlock(&recall_lock); 2980 list_for_each_safe(pos, next, &reaplist) { 2981 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 2982 list_del_init(&dp->dl_recall_lru); 2983 unhash_delegation(dp); 2984 } 2985 test_val = nfsd4_lease; 2986 list_for_each_safe(pos, next, &close_lru) { 2987 sop = list_entry(pos, struct nfs4_stateowner, so_close_lru); 2988 if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) { 2989 u = sop->so_time - cutoff; 2990 if (test_val > u) 2991 test_val = u; 2992 break; 2993 } 2994 dprintk("NFSD: purging unused open stateowner (so_id %d)\n", 2995 sop->so_id); 2996 release_openowner(sop); 2997 } 2998 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT) 2999 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT; 3000 nfs4_unlock_state(); 3001 return clientid_val; 3002 } 3003 3004 static struct workqueue_struct *laundry_wq; 3005 static void laundromat_main(struct work_struct *); 3006 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); 3007 3008 static void 3009 laundromat_main(struct work_struct *not_used) 3010 { 3011 time_t t; 3012 3013 t = nfs4_laundromat(); 3014 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 3015 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ); 3016 } 3017 3018 static struct nfs4_stateowner * 3019 search_close_lru(u32 st_id, int flags) 3020 { 3021 struct nfs4_stateowner *local = NULL; 3022 3023 if (flags & CLOSE_STATE) { 3024 list_for_each_entry(local, &close_lru, so_close_lru) { 3025 if (local->so_id == st_id) 3026 return local; 3027 } 3028 } 3029 return NULL; 3030 } 3031 3032 static inline int 3033 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp) 3034 { 3035 return fhp->fh_dentry->d_inode != stp->st_file->fi_inode; 3036 } 3037 3038 static int 3039 STALE_STATEID(stateid_t *stateid) 3040 { 3041 if (stateid->si_boot == boot_time) 3042 return 0; 3043 dprintk("NFSD: stale stateid " STATEID_FMT "!\n", 3044 STATEID_VAL(stateid)); 3045 return 1; 3046 } 3047 3048 static inline int 3049 access_permit_read(unsigned long access_bmap) 3050 { 3051 return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) || 3052 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) || 3053 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap); 3054 } 3055 3056 static inline int 3057 access_permit_write(unsigned long access_bmap) 3058 { 3059 return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) || 3060 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap); 3061 } 3062 3063 static 3064 __be32 nfs4_check_openmode(struct nfs4_stateid *stp, int flags) 3065 { 3066 __be32 status = nfserr_openmode; 3067 3068 /* For lock stateid's, we test the parent open, not the lock: */ 3069 if (stp->st_openstp) 3070 stp = stp->st_openstp; 3071 if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap))) 3072 goto out; 3073 if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap))) 3074 goto out; 3075 status = nfs_ok; 3076 out: 3077 return status; 3078 } 3079 3080 static inline __be32 3081 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) 3082 { 3083 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 3084 return nfs_ok; 3085 else if (locks_in_grace()) { 3086 /* Answer in remaining cases depends on existence of 3087 * conflicting state; so we must wait out the grace period. */ 3088 return nfserr_grace; 3089 } else if (flags & WR_STATE) 3090 return nfs4_share_conflict(current_fh, 3091 NFS4_SHARE_DENY_WRITE); 3092 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 3093 return nfs4_share_conflict(current_fh, 3094 NFS4_SHARE_DENY_READ); 3095 } 3096 3097 /* 3098 * Allow READ/WRITE during grace period on recovered state only for files 3099 * that are not able to provide mandatory locking. 3100 */ 3101 static inline int 3102 grace_disallows_io(struct inode *inode) 3103 { 3104 return locks_in_grace() && mandatory_lock(inode); 3105 } 3106 3107 static int check_stateid_generation(stateid_t *in, stateid_t *ref, int flags) 3108 { 3109 /* 3110 * When sessions are used the stateid generation number is ignored 3111 * when it is zero. 3112 */ 3113 if ((flags & HAS_SESSION) && in->si_generation == 0) 3114 goto out; 3115 3116 /* If the client sends us a stateid from the future, it's buggy: */ 3117 if (in->si_generation > ref->si_generation) 3118 return nfserr_bad_stateid; 3119 /* 3120 * The following, however, can happen. For example, if the 3121 * client sends an open and some IO at the same time, the open 3122 * may bump si_generation while the IO is still in flight. 3123 * Thanks to hard links and renames, the client never knows what 3124 * file an open will affect. So it could avoid that situation 3125 * only by serializing all opens and IO from the same open 3126 * owner. To recover from the old_stateid error, the client 3127 * will just have to retry the IO: 3128 */ 3129 if (in->si_generation < ref->si_generation) 3130 return nfserr_old_stateid; 3131 out: 3132 return nfs_ok; 3133 } 3134 3135 static int is_delegation_stateid(stateid_t *stateid) 3136 { 3137 return stateid->si_fileid == 0; 3138 } 3139 3140 /* 3141 * Checks for stateid operations 3142 */ 3143 __be32 3144 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, 3145 stateid_t *stateid, int flags, struct file **filpp) 3146 { 3147 struct nfs4_stateid *stp = NULL; 3148 struct nfs4_delegation *dp = NULL; 3149 struct svc_fh *current_fh = &cstate->current_fh; 3150 struct inode *ino = current_fh->fh_dentry->d_inode; 3151 __be32 status; 3152 3153 if (filpp) 3154 *filpp = NULL; 3155 3156 if (grace_disallows_io(ino)) 3157 return nfserr_grace; 3158 3159 if (nfsd4_has_session(cstate)) 3160 flags |= HAS_SESSION; 3161 3162 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3163 return check_special_stateids(current_fh, stateid, flags); 3164 3165 status = nfserr_stale_stateid; 3166 if (STALE_STATEID(stateid)) 3167 goto out; 3168 3169 /* 3170 * We assume that any stateid that has the current boot time, 3171 * but that we can't find, is expired: 3172 */ 3173 status = nfserr_expired; 3174 if (is_delegation_stateid(stateid)) { 3175 dp = find_delegation_stateid(ino, stateid); 3176 if (!dp) 3177 goto out; 3178 status = check_stateid_generation(stateid, &dp->dl_stateid, 3179 flags); 3180 if (status) 3181 goto out; 3182 status = nfs4_check_delegmode(dp, flags); 3183 if (status) 3184 goto out; 3185 renew_client(dp->dl_client); 3186 if (filpp) { 3187 *filpp = dp->dl_file->fi_deleg_file; 3188 BUG_ON(!*filpp); 3189 } 3190 } else { /* open or lock stateid */ 3191 stp = find_stateid(stateid, flags); 3192 if (!stp) 3193 goto out; 3194 status = nfserr_bad_stateid; 3195 if (nfs4_check_fh(current_fh, stp)) 3196 goto out; 3197 if (!stp->st_stateowner->so_confirmed) 3198 goto out; 3199 status = check_stateid_generation(stateid, &stp->st_stateid, 3200 flags); 3201 if (status) 3202 goto out; 3203 status = nfs4_check_openmode(stp, flags); 3204 if (status) 3205 goto out; 3206 renew_client(stp->st_stateowner->so_client); 3207 if (filpp) { 3208 if (flags & RD_STATE) 3209 *filpp = find_readable_file(stp->st_file); 3210 else 3211 *filpp = find_writeable_file(stp->st_file); 3212 } 3213 } 3214 status = nfs_ok; 3215 out: 3216 return status; 3217 } 3218 3219 static inline int 3220 setlkflg (int type) 3221 { 3222 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 3223 RD_STATE : WR_STATE; 3224 } 3225 3226 /* 3227 * Checks for sequence id mutating operations. 3228 */ 3229 static __be32 3230 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 3231 stateid_t *stateid, int flags, 3232 struct nfs4_stateowner **sopp, 3233 struct nfs4_stateid **stpp, struct nfsd4_lock *lock) 3234 { 3235 struct nfs4_stateid *stp; 3236 struct nfs4_stateowner *sop; 3237 struct svc_fh *current_fh = &cstate->current_fh; 3238 __be32 status; 3239 3240 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 3241 seqid, STATEID_VAL(stateid)); 3242 3243 *stpp = NULL; 3244 *sopp = NULL; 3245 3246 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 3247 dprintk("NFSD: preprocess_seqid_op: magic stateid!\n"); 3248 return nfserr_bad_stateid; 3249 } 3250 3251 if (STALE_STATEID(stateid)) 3252 return nfserr_stale_stateid; 3253 3254 if (nfsd4_has_session(cstate)) 3255 flags |= HAS_SESSION; 3256 3257 /* 3258 * We return BAD_STATEID if filehandle doesn't match stateid, 3259 * the confirmed flag is incorrecly set, or the generation 3260 * number is incorrect. 3261 */ 3262 stp = find_stateid(stateid, flags); 3263 if (stp == NULL) { 3264 /* 3265 * Also, we should make sure this isn't just the result of 3266 * a replayed close: 3267 */ 3268 sop = search_close_lru(stateid->si_stateownerid, flags); 3269 /* It's not stale; let's assume it's expired: */ 3270 if (sop == NULL) 3271 return nfserr_expired; 3272 *sopp = sop; 3273 goto check_replay; 3274 } 3275 3276 *stpp = stp; 3277 *sopp = sop = stp->st_stateowner; 3278 3279 if (lock) { 3280 clientid_t *lockclid = &lock->v.new.clientid; 3281 struct nfs4_client *clp = sop->so_client; 3282 int lkflg = 0; 3283 __be32 status; 3284 3285 lkflg = setlkflg(lock->lk_type); 3286 3287 if (lock->lk_is_new) { 3288 if (!sop->so_is_open_owner) 3289 return nfserr_bad_stateid; 3290 if (!(flags & HAS_SESSION) && 3291 !same_clid(&clp->cl_clientid, lockclid)) 3292 return nfserr_bad_stateid; 3293 /* stp is the open stateid */ 3294 status = nfs4_check_openmode(stp, lkflg); 3295 if (status) 3296 return status; 3297 } else { 3298 /* stp is the lock stateid */ 3299 status = nfs4_check_openmode(stp->st_openstp, lkflg); 3300 if (status) 3301 return status; 3302 } 3303 } 3304 3305 if (nfs4_check_fh(current_fh, stp)) { 3306 dprintk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n"); 3307 return nfserr_bad_stateid; 3308 } 3309 3310 /* 3311 * We now validate the seqid and stateid generation numbers. 3312 * For the moment, we ignore the possibility of 3313 * generation number wraparound. 3314 */ 3315 if (!(flags & HAS_SESSION) && seqid != sop->so_seqid) 3316 goto check_replay; 3317 3318 if (sop->so_confirmed && flags & CONFIRM) { 3319 dprintk("NFSD: preprocess_seqid_op: expected" 3320 " unconfirmed stateowner!\n"); 3321 return nfserr_bad_stateid; 3322 } 3323 if (!sop->so_confirmed && !(flags & CONFIRM)) { 3324 dprintk("NFSD: preprocess_seqid_op: stateowner not" 3325 " confirmed yet!\n"); 3326 return nfserr_bad_stateid; 3327 } 3328 status = check_stateid_generation(stateid, &stp->st_stateid, flags); 3329 if (status) 3330 return status; 3331 renew_client(sop->so_client); 3332 return nfs_ok; 3333 3334 check_replay: 3335 if (seqid == sop->so_seqid - 1) { 3336 dprintk("NFSD: preprocess_seqid_op: retransmission?\n"); 3337 /* indicate replay to calling function */ 3338 return nfserr_replay_me; 3339 } 3340 dprintk("NFSD: preprocess_seqid_op: bad seqid (expected %d, got %d)\n", 3341 sop->so_seqid, seqid); 3342 *sopp = NULL; 3343 return nfserr_bad_seqid; 3344 } 3345 3346 __be32 3347 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3348 struct nfsd4_open_confirm *oc) 3349 { 3350 __be32 status; 3351 struct nfs4_stateowner *sop; 3352 struct nfs4_stateid *stp; 3353 3354 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 3355 (int)cstate->current_fh.fh_dentry->d_name.len, 3356 cstate->current_fh.fh_dentry->d_name.name); 3357 3358 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 3359 if (status) 3360 return status; 3361 3362 nfs4_lock_state(); 3363 3364 if ((status = nfs4_preprocess_seqid_op(cstate, 3365 oc->oc_seqid, &oc->oc_req_stateid, 3366 CONFIRM | OPEN_STATE, 3367 &oc->oc_stateowner, &stp, NULL))) 3368 goto out; 3369 3370 sop = oc->oc_stateowner; 3371 sop->so_confirmed = 1; 3372 update_stateid(&stp->st_stateid); 3373 memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t)); 3374 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 3375 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stateid)); 3376 3377 nfsd4_create_clid_dir(sop->so_client); 3378 out: 3379 if (oc->oc_stateowner) { 3380 nfs4_get_stateowner(oc->oc_stateowner); 3381 cstate->replay_owner = oc->oc_stateowner; 3382 } 3383 nfs4_unlock_state(); 3384 return status; 3385 } 3386 3387 3388 /* 3389 * unset all bits in union bitmap (bmap) that 3390 * do not exist in share (from successful OPEN_DOWNGRADE) 3391 */ 3392 static void 3393 reset_union_bmap_access(unsigned long access, unsigned long *bmap) 3394 { 3395 int i; 3396 for (i = 1; i < 4; i++) { 3397 if ((i & access) != i) 3398 __clear_bit(i, bmap); 3399 } 3400 } 3401 3402 static void 3403 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap) 3404 { 3405 int i; 3406 for (i = 0; i < 4; i++) { 3407 if ((i & deny) != i) 3408 __clear_bit(i, bmap); 3409 } 3410 } 3411 3412 __be32 3413 nfsd4_open_downgrade(struct svc_rqst *rqstp, 3414 struct nfsd4_compound_state *cstate, 3415 struct nfsd4_open_downgrade *od) 3416 { 3417 __be32 status; 3418 struct nfs4_stateid *stp; 3419 unsigned int share_access; 3420 3421 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 3422 (int)cstate->current_fh.fh_dentry->d_name.len, 3423 cstate->current_fh.fh_dentry->d_name.name); 3424 3425 if (!access_valid(od->od_share_access, cstate->minorversion) 3426 || !deny_valid(od->od_share_deny)) 3427 return nfserr_inval; 3428 3429 nfs4_lock_state(); 3430 if ((status = nfs4_preprocess_seqid_op(cstate, 3431 od->od_seqid, 3432 &od->od_stateid, 3433 OPEN_STATE, 3434 &od->od_stateowner, &stp, NULL))) 3435 goto out; 3436 3437 status = nfserr_inval; 3438 if (!test_bit(od->od_share_access, &stp->st_access_bmap)) { 3439 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n", 3440 stp->st_access_bmap, od->od_share_access); 3441 goto out; 3442 } 3443 if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) { 3444 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 3445 stp->st_deny_bmap, od->od_share_deny); 3446 goto out; 3447 } 3448 set_access(&share_access, stp->st_access_bmap); 3449 nfs4_file_downgrade(stp->st_file, share_access & ~od->od_share_access); 3450 3451 reset_union_bmap_access(od->od_share_access, &stp->st_access_bmap); 3452 reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap); 3453 3454 update_stateid(&stp->st_stateid); 3455 memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t)); 3456 status = nfs_ok; 3457 out: 3458 if (od->od_stateowner) { 3459 nfs4_get_stateowner(od->od_stateowner); 3460 cstate->replay_owner = od->od_stateowner; 3461 } 3462 nfs4_unlock_state(); 3463 return status; 3464 } 3465 3466 /* 3467 * nfs4_unlock_state() called after encode 3468 */ 3469 __be32 3470 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3471 struct nfsd4_close *close) 3472 { 3473 __be32 status; 3474 struct nfs4_stateid *stp; 3475 3476 dprintk("NFSD: nfsd4_close on file %.*s\n", 3477 (int)cstate->current_fh.fh_dentry->d_name.len, 3478 cstate->current_fh.fh_dentry->d_name.name); 3479 3480 nfs4_lock_state(); 3481 /* check close_lru for replay */ 3482 if ((status = nfs4_preprocess_seqid_op(cstate, 3483 close->cl_seqid, 3484 &close->cl_stateid, 3485 OPEN_STATE | CLOSE_STATE, 3486 &close->cl_stateowner, &stp, NULL))) 3487 goto out; 3488 status = nfs_ok; 3489 update_stateid(&stp->st_stateid); 3490 memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t)); 3491 3492 /* release_stateid() calls nfsd_close() if needed */ 3493 release_open_stateid(stp); 3494 3495 /* place unused nfs4_stateowners on so_close_lru list to be 3496 * released by the laundromat service after the lease period 3497 * to enable us to handle CLOSE replay 3498 */ 3499 if (list_empty(&close->cl_stateowner->so_stateids)) 3500 move_to_close_lru(close->cl_stateowner); 3501 out: 3502 if (close->cl_stateowner) { 3503 nfs4_get_stateowner(close->cl_stateowner); 3504 cstate->replay_owner = close->cl_stateowner; 3505 } 3506 nfs4_unlock_state(); 3507 return status; 3508 } 3509 3510 __be32 3511 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3512 struct nfsd4_delegreturn *dr) 3513 { 3514 struct nfs4_delegation *dp; 3515 stateid_t *stateid = &dr->dr_stateid; 3516 struct inode *inode; 3517 __be32 status; 3518 int flags = 0; 3519 3520 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 3521 return status; 3522 inode = cstate->current_fh.fh_dentry->d_inode; 3523 3524 if (nfsd4_has_session(cstate)) 3525 flags |= HAS_SESSION; 3526 nfs4_lock_state(); 3527 status = nfserr_bad_stateid; 3528 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3529 goto out; 3530 status = nfserr_stale_stateid; 3531 if (STALE_STATEID(stateid)) 3532 goto out; 3533 status = nfserr_bad_stateid; 3534 if (!is_delegation_stateid(stateid)) 3535 goto out; 3536 status = nfserr_expired; 3537 dp = find_delegation_stateid(inode, stateid); 3538 if (!dp) 3539 goto out; 3540 status = check_stateid_generation(stateid, &dp->dl_stateid, flags); 3541 if (status) 3542 goto out; 3543 renew_client(dp->dl_client); 3544 3545 unhash_delegation(dp); 3546 out: 3547 nfs4_unlock_state(); 3548 3549 return status; 3550 } 3551 3552 3553 /* 3554 * Lock owner state (byte-range locks) 3555 */ 3556 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 3557 #define LOCK_HASH_BITS 8 3558 #define LOCK_HASH_SIZE (1 << LOCK_HASH_BITS) 3559 #define LOCK_HASH_MASK (LOCK_HASH_SIZE - 1) 3560 3561 static inline u64 3562 end_offset(u64 start, u64 len) 3563 { 3564 u64 end; 3565 3566 end = start + len; 3567 return end >= start ? end: NFS4_MAX_UINT64; 3568 } 3569 3570 /* last octet in a range */ 3571 static inline u64 3572 last_byte_offset(u64 start, u64 len) 3573 { 3574 u64 end; 3575 3576 BUG_ON(!len); 3577 end = start + len; 3578 return end > start ? end - 1: NFS4_MAX_UINT64; 3579 } 3580 3581 #define lockownerid_hashval(id) \ 3582 ((id) & LOCK_HASH_MASK) 3583 3584 static inline unsigned int 3585 lock_ownerstr_hashval(struct inode *inode, u32 cl_id, 3586 struct xdr_netobj *ownername) 3587 { 3588 return (file_hashval(inode) + cl_id 3589 + opaque_hashval(ownername->data, ownername->len)) 3590 & LOCK_HASH_MASK; 3591 } 3592 3593 static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE]; 3594 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE]; 3595 static struct list_head lockstateid_hashtbl[STATEID_HASH_SIZE]; 3596 3597 static struct nfs4_stateid * 3598 find_stateid(stateid_t *stid, int flags) 3599 { 3600 struct nfs4_stateid *local; 3601 u32 st_id = stid->si_stateownerid; 3602 u32 f_id = stid->si_fileid; 3603 unsigned int hashval; 3604 3605 dprintk("NFSD: find_stateid flags 0x%x\n",flags); 3606 if (flags & (LOCK_STATE | RD_STATE | WR_STATE)) { 3607 hashval = stateid_hashval(st_id, f_id); 3608 list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) { 3609 if ((local->st_stateid.si_stateownerid == st_id) && 3610 (local->st_stateid.si_fileid == f_id)) 3611 return local; 3612 } 3613 } 3614 3615 if (flags & (OPEN_STATE | RD_STATE | WR_STATE)) { 3616 hashval = stateid_hashval(st_id, f_id); 3617 list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) { 3618 if ((local->st_stateid.si_stateownerid == st_id) && 3619 (local->st_stateid.si_fileid == f_id)) 3620 return local; 3621 } 3622 } 3623 return NULL; 3624 } 3625 3626 static struct nfs4_delegation * 3627 find_delegation_stateid(struct inode *ino, stateid_t *stid) 3628 { 3629 struct nfs4_file *fp; 3630 struct nfs4_delegation *dl; 3631 3632 dprintk("NFSD: %s: stateid=" STATEID_FMT "\n", __func__, 3633 STATEID_VAL(stid)); 3634 3635 fp = find_file(ino); 3636 if (!fp) 3637 return NULL; 3638 dl = find_delegation_file(fp, stid); 3639 put_nfs4_file(fp); 3640 return dl; 3641 } 3642 3643 /* 3644 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 3645 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 3646 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 3647 * locking, this prevents us from being completely protocol-compliant. The 3648 * real solution to this problem is to start using unsigned file offsets in 3649 * the VFS, but this is a very deep change! 3650 */ 3651 static inline void 3652 nfs4_transform_lock_offset(struct file_lock *lock) 3653 { 3654 if (lock->fl_start < 0) 3655 lock->fl_start = OFFSET_MAX; 3656 if (lock->fl_end < 0) 3657 lock->fl_end = OFFSET_MAX; 3658 } 3659 3660 /* Hack!: For now, we're defining this just so we can use a pointer to it 3661 * as a unique cookie to identify our (NFSv4's) posix locks. */ 3662 static const struct lock_manager_operations nfsd_posix_mng_ops = { 3663 }; 3664 3665 static inline void 3666 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 3667 { 3668 struct nfs4_stateowner *sop; 3669 3670 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 3671 sop = (struct nfs4_stateowner *) fl->fl_owner; 3672 kref_get(&sop->so_ref); 3673 deny->ld_sop = sop; 3674 deny->ld_clientid = sop->so_client->cl_clientid; 3675 } else { 3676 deny->ld_sop = NULL; 3677 deny->ld_clientid.cl_boot = 0; 3678 deny->ld_clientid.cl_id = 0; 3679 } 3680 deny->ld_start = fl->fl_start; 3681 deny->ld_length = NFS4_MAX_UINT64; 3682 if (fl->fl_end != NFS4_MAX_UINT64) 3683 deny->ld_length = fl->fl_end - fl->fl_start + 1; 3684 deny->ld_type = NFS4_READ_LT; 3685 if (fl->fl_type != F_RDLCK) 3686 deny->ld_type = NFS4_WRITE_LT; 3687 } 3688 3689 static struct nfs4_stateowner * 3690 find_lockstateowner_str(struct inode *inode, clientid_t *clid, 3691 struct xdr_netobj *owner) 3692 { 3693 unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner); 3694 struct nfs4_stateowner *op; 3695 3696 list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) { 3697 if (same_owner_str(op, owner, clid)) 3698 return op; 3699 } 3700 return NULL; 3701 } 3702 3703 /* 3704 * Alloc a lock owner structure. 3705 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 3706 * occurred. 3707 * 3708 * strhashval = lock_ownerstr_hashval 3709 */ 3710 3711 static struct nfs4_stateowner * 3712 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_stateid *open_stp, struct nfsd4_lock *lock) { 3713 struct nfs4_stateowner *sop; 3714 struct nfs4_replay *rp; 3715 unsigned int idhashval; 3716 3717 if (!(sop = alloc_stateowner(&lock->lk_new_owner))) 3718 return NULL; 3719 idhashval = lockownerid_hashval(current_ownerid); 3720 INIT_LIST_HEAD(&sop->so_idhash); 3721 INIT_LIST_HEAD(&sop->so_strhash); 3722 INIT_LIST_HEAD(&sop->so_perclient); 3723 INIT_LIST_HEAD(&sop->so_stateids); 3724 INIT_LIST_HEAD(&sop->so_perstateid); 3725 INIT_LIST_HEAD(&sop->so_close_lru); /* not used */ 3726 sop->so_time = 0; 3727 list_add(&sop->so_idhash, &lock_ownerid_hashtbl[idhashval]); 3728 list_add(&sop->so_strhash, &lock_ownerstr_hashtbl[strhashval]); 3729 list_add(&sop->so_perstateid, &open_stp->st_lockowners); 3730 sop->so_is_open_owner = 0; 3731 sop->so_id = current_ownerid++; 3732 sop->so_client = clp; 3733 /* It is the openowner seqid that will be incremented in encode in the 3734 * case of new lockowners; so increment the lock seqid manually: */ 3735 sop->so_seqid = lock->lk_new_lock_seqid + 1; 3736 sop->so_confirmed = 1; 3737 rp = &sop->so_replay; 3738 rp->rp_status = nfserr_serverfault; 3739 rp->rp_buflen = 0; 3740 rp->rp_buf = rp->rp_ibuf; 3741 return sop; 3742 } 3743 3744 static struct nfs4_stateid * 3745 alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struct nfs4_stateid *open_stp) 3746 { 3747 struct nfs4_stateid *stp; 3748 unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id); 3749 3750 stp = nfs4_alloc_stateid(); 3751 if (stp == NULL) 3752 goto out; 3753 INIT_LIST_HEAD(&stp->st_hash); 3754 INIT_LIST_HEAD(&stp->st_perfile); 3755 INIT_LIST_HEAD(&stp->st_perstateowner); 3756 INIT_LIST_HEAD(&stp->st_lockowners); /* not used */ 3757 list_add(&stp->st_hash, &lockstateid_hashtbl[hashval]); 3758 list_add(&stp->st_perfile, &fp->fi_stateids); 3759 list_add(&stp->st_perstateowner, &sop->so_stateids); 3760 stp->st_stateowner = sop; 3761 get_nfs4_file(fp); 3762 stp->st_file = fp; 3763 stp->st_stateid.si_boot = boot_time; 3764 stp->st_stateid.si_stateownerid = sop->so_id; 3765 stp->st_stateid.si_fileid = fp->fi_id; 3766 stp->st_stateid.si_generation = 0; 3767 stp->st_access_bmap = 0; 3768 stp->st_deny_bmap = open_stp->st_deny_bmap; 3769 stp->st_openstp = open_stp; 3770 3771 out: 3772 return stp; 3773 } 3774 3775 static int 3776 check_lock_length(u64 offset, u64 length) 3777 { 3778 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 3779 LOFF_OVERFLOW(offset, length))); 3780 } 3781 3782 static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access) 3783 { 3784 struct nfs4_file *fp = lock_stp->st_file; 3785 int oflag = nfs4_access_to_omode(access); 3786 3787 if (test_bit(access, &lock_stp->st_access_bmap)) 3788 return; 3789 nfs4_file_get_access(fp, oflag); 3790 __set_bit(access, &lock_stp->st_access_bmap); 3791 } 3792 3793 /* 3794 * LOCK operation 3795 */ 3796 __be32 3797 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3798 struct nfsd4_lock *lock) 3799 { 3800 struct nfs4_stateowner *open_sop = NULL; 3801 struct nfs4_stateowner *lock_sop = NULL; 3802 struct nfs4_stateid *lock_stp; 3803 struct nfs4_file *fp; 3804 struct file *filp = NULL; 3805 struct file_lock file_lock; 3806 struct file_lock conflock; 3807 __be32 status = 0; 3808 unsigned int strhashval; 3809 int err; 3810 3811 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 3812 (long long) lock->lk_offset, 3813 (long long) lock->lk_length); 3814 3815 if (check_lock_length(lock->lk_offset, lock->lk_length)) 3816 return nfserr_inval; 3817 3818 if ((status = fh_verify(rqstp, &cstate->current_fh, 3819 S_IFREG, NFSD_MAY_LOCK))) { 3820 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 3821 return status; 3822 } 3823 3824 nfs4_lock_state(); 3825 3826 if (lock->lk_is_new) { 3827 /* 3828 * Client indicates that this is a new lockowner. 3829 * Use open owner and open stateid to create lock owner and 3830 * lock stateid. 3831 */ 3832 struct nfs4_stateid *open_stp = NULL; 3833 3834 status = nfserr_stale_clientid; 3835 if (!nfsd4_has_session(cstate) && 3836 STALE_CLIENTID(&lock->lk_new_clientid)) 3837 goto out; 3838 3839 /* validate and update open stateid and open seqid */ 3840 status = nfs4_preprocess_seqid_op(cstate, 3841 lock->lk_new_open_seqid, 3842 &lock->lk_new_open_stateid, 3843 OPEN_STATE, 3844 &lock->lk_replay_owner, &open_stp, 3845 lock); 3846 if (status) 3847 goto out; 3848 open_sop = lock->lk_replay_owner; 3849 /* create lockowner and lock stateid */ 3850 fp = open_stp->st_file; 3851 strhashval = lock_ownerstr_hashval(fp->fi_inode, 3852 open_sop->so_client->cl_clientid.cl_id, 3853 &lock->v.new.owner); 3854 /* XXX: Do we need to check for duplicate stateowners on 3855 * the same file, or should they just be allowed (and 3856 * create new stateids)? */ 3857 status = nfserr_resource; 3858 lock_sop = alloc_init_lock_stateowner(strhashval, 3859 open_sop->so_client, open_stp, lock); 3860 if (lock_sop == NULL) 3861 goto out; 3862 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp); 3863 if (lock_stp == NULL) 3864 goto out; 3865 } else { 3866 /* lock (lock owner + lock stateid) already exists */ 3867 status = nfs4_preprocess_seqid_op(cstate, 3868 lock->lk_old_lock_seqid, 3869 &lock->lk_old_lock_stateid, 3870 LOCK_STATE, 3871 &lock->lk_replay_owner, &lock_stp, lock); 3872 if (status) 3873 goto out; 3874 lock_sop = lock->lk_replay_owner; 3875 fp = lock_stp->st_file; 3876 } 3877 /* lock->lk_replay_owner and lock_stp have been created or found */ 3878 3879 status = nfserr_grace; 3880 if (locks_in_grace() && !lock->lk_reclaim) 3881 goto out; 3882 status = nfserr_no_grace; 3883 if (!locks_in_grace() && lock->lk_reclaim) 3884 goto out; 3885 3886 locks_init_lock(&file_lock); 3887 switch (lock->lk_type) { 3888 case NFS4_READ_LT: 3889 case NFS4_READW_LT: 3890 filp = find_readable_file(lock_stp->st_file); 3891 if (filp) 3892 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 3893 file_lock.fl_type = F_RDLCK; 3894 break; 3895 case NFS4_WRITE_LT: 3896 case NFS4_WRITEW_LT: 3897 filp = find_writeable_file(lock_stp->st_file); 3898 if (filp) 3899 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 3900 file_lock.fl_type = F_WRLCK; 3901 break; 3902 default: 3903 status = nfserr_inval; 3904 goto out; 3905 } 3906 if (!filp) { 3907 status = nfserr_openmode; 3908 goto out; 3909 } 3910 file_lock.fl_owner = (fl_owner_t)lock_sop; 3911 file_lock.fl_pid = current->tgid; 3912 file_lock.fl_file = filp; 3913 file_lock.fl_flags = FL_POSIX; 3914 file_lock.fl_lmops = &nfsd_posix_mng_ops; 3915 3916 file_lock.fl_start = lock->lk_offset; 3917 file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 3918 nfs4_transform_lock_offset(&file_lock); 3919 3920 /* 3921 * Try to lock the file in the VFS. 3922 * Note: locks.c uses the BKL to protect the inode's lock list. 3923 */ 3924 3925 err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); 3926 switch (-err) { 3927 case 0: /* success! */ 3928 update_stateid(&lock_stp->st_stateid); 3929 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, 3930 sizeof(stateid_t)); 3931 status = 0; 3932 break; 3933 case (EAGAIN): /* conflock holds conflicting lock */ 3934 status = nfserr_denied; 3935 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 3936 nfs4_set_lock_denied(&conflock, &lock->lk_denied); 3937 break; 3938 case (EDEADLK): 3939 status = nfserr_deadlock; 3940 break; 3941 default: 3942 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 3943 status = nfserr_resource; 3944 break; 3945 } 3946 out: 3947 if (status && lock->lk_is_new && lock_sop) 3948 release_lockowner(lock_sop); 3949 if (lock->lk_replay_owner) { 3950 nfs4_get_stateowner(lock->lk_replay_owner); 3951 cstate->replay_owner = lock->lk_replay_owner; 3952 } 3953 nfs4_unlock_state(); 3954 return status; 3955 } 3956 3957 /* 3958 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 3959 * so we do a temporary open here just to get an open file to pass to 3960 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 3961 * inode operation.) 3962 */ 3963 static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 3964 { 3965 struct file *file; 3966 int err; 3967 3968 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 3969 if (err) 3970 return err; 3971 err = vfs_test_lock(file, lock); 3972 nfsd_close(file); 3973 return err; 3974 } 3975 3976 /* 3977 * LOCKT operation 3978 */ 3979 __be32 3980 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3981 struct nfsd4_lockt *lockt) 3982 { 3983 struct inode *inode; 3984 struct file_lock file_lock; 3985 int error; 3986 __be32 status; 3987 3988 if (locks_in_grace()) 3989 return nfserr_grace; 3990 3991 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 3992 return nfserr_inval; 3993 3994 lockt->lt_stateowner = NULL; 3995 nfs4_lock_state(); 3996 3997 status = nfserr_stale_clientid; 3998 if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid)) 3999 goto out; 4000 4001 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) { 4002 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n"); 4003 if (status == nfserr_symlink) 4004 status = nfserr_inval; 4005 goto out; 4006 } 4007 4008 inode = cstate->current_fh.fh_dentry->d_inode; 4009 locks_init_lock(&file_lock); 4010 switch (lockt->lt_type) { 4011 case NFS4_READ_LT: 4012 case NFS4_READW_LT: 4013 file_lock.fl_type = F_RDLCK; 4014 break; 4015 case NFS4_WRITE_LT: 4016 case NFS4_WRITEW_LT: 4017 file_lock.fl_type = F_WRLCK; 4018 break; 4019 default: 4020 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 4021 status = nfserr_inval; 4022 goto out; 4023 } 4024 4025 lockt->lt_stateowner = find_lockstateowner_str(inode, 4026 &lockt->lt_clientid, &lockt->lt_owner); 4027 if (lockt->lt_stateowner) 4028 file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner; 4029 file_lock.fl_pid = current->tgid; 4030 file_lock.fl_flags = FL_POSIX; 4031 4032 file_lock.fl_start = lockt->lt_offset; 4033 file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 4034 4035 nfs4_transform_lock_offset(&file_lock); 4036 4037 status = nfs_ok; 4038 error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock); 4039 if (error) { 4040 status = nfserrno(error); 4041 goto out; 4042 } 4043 if (file_lock.fl_type != F_UNLCK) { 4044 status = nfserr_denied; 4045 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied); 4046 } 4047 out: 4048 nfs4_unlock_state(); 4049 return status; 4050 } 4051 4052 __be32 4053 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4054 struct nfsd4_locku *locku) 4055 { 4056 struct nfs4_stateid *stp; 4057 struct file *filp = NULL; 4058 struct file_lock file_lock; 4059 __be32 status; 4060 int err; 4061 4062 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 4063 (long long) locku->lu_offset, 4064 (long long) locku->lu_length); 4065 4066 if (check_lock_length(locku->lu_offset, locku->lu_length)) 4067 return nfserr_inval; 4068 4069 nfs4_lock_state(); 4070 4071 if ((status = nfs4_preprocess_seqid_op(cstate, 4072 locku->lu_seqid, 4073 &locku->lu_stateid, 4074 LOCK_STATE, 4075 &locku->lu_stateowner, &stp, NULL))) 4076 goto out; 4077 4078 filp = find_any_file(stp->st_file); 4079 if (!filp) { 4080 status = nfserr_lock_range; 4081 goto out; 4082 } 4083 BUG_ON(!filp); 4084 locks_init_lock(&file_lock); 4085 file_lock.fl_type = F_UNLCK; 4086 file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner; 4087 file_lock.fl_pid = current->tgid; 4088 file_lock.fl_file = filp; 4089 file_lock.fl_flags = FL_POSIX; 4090 file_lock.fl_lmops = &nfsd_posix_mng_ops; 4091 file_lock.fl_start = locku->lu_offset; 4092 4093 file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length); 4094 nfs4_transform_lock_offset(&file_lock); 4095 4096 /* 4097 * Try to unlock the file in the VFS. 4098 */ 4099 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL); 4100 if (err) { 4101 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 4102 goto out_nfserr; 4103 } 4104 /* 4105 * OK, unlock succeeded; the only thing left to do is update the stateid. 4106 */ 4107 update_stateid(&stp->st_stateid); 4108 memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t)); 4109 4110 out: 4111 if (locku->lu_stateowner) { 4112 nfs4_get_stateowner(locku->lu_stateowner); 4113 cstate->replay_owner = locku->lu_stateowner; 4114 } 4115 nfs4_unlock_state(); 4116 return status; 4117 4118 out_nfserr: 4119 status = nfserrno(err); 4120 goto out; 4121 } 4122 4123 /* 4124 * returns 4125 * 1: locks held by lockowner 4126 * 0: no locks held by lockowner 4127 */ 4128 static int 4129 check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner) 4130 { 4131 struct file_lock **flpp; 4132 struct inode *inode = filp->fi_inode; 4133 int status = 0; 4134 4135 lock_flocks(); 4136 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 4137 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 4138 status = 1; 4139 goto out; 4140 } 4141 } 4142 out: 4143 unlock_flocks(); 4144 return status; 4145 } 4146 4147 __be32 4148 nfsd4_release_lockowner(struct svc_rqst *rqstp, 4149 struct nfsd4_compound_state *cstate, 4150 struct nfsd4_release_lockowner *rlockowner) 4151 { 4152 clientid_t *clid = &rlockowner->rl_clientid; 4153 struct nfs4_stateowner *sop; 4154 struct nfs4_stateid *stp; 4155 struct xdr_netobj *owner = &rlockowner->rl_owner; 4156 struct list_head matches; 4157 int i; 4158 __be32 status; 4159 4160 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 4161 clid->cl_boot, clid->cl_id); 4162 4163 /* XXX check for lease expiration */ 4164 4165 status = nfserr_stale_clientid; 4166 if (STALE_CLIENTID(clid)) 4167 return status; 4168 4169 nfs4_lock_state(); 4170 4171 status = nfserr_locks_held; 4172 /* XXX: we're doing a linear search through all the lockowners. 4173 * Yipes! For now we'll just hope clients aren't really using 4174 * release_lockowner much, but eventually we have to fix these 4175 * data structures. */ 4176 INIT_LIST_HEAD(&matches); 4177 for (i = 0; i < LOCK_HASH_SIZE; i++) { 4178 list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) { 4179 if (!same_owner_str(sop, owner, clid)) 4180 continue; 4181 list_for_each_entry(stp, &sop->so_stateids, 4182 st_perstateowner) { 4183 if (check_for_locks(stp->st_file, sop)) 4184 goto out; 4185 /* Note: so_perclient unused for lockowners, 4186 * so it's OK to fool with here. */ 4187 list_add(&sop->so_perclient, &matches); 4188 } 4189 } 4190 } 4191 /* Clients probably won't expect us to return with some (but not all) 4192 * of the lockowner state released; so don't release any until all 4193 * have been checked. */ 4194 status = nfs_ok; 4195 while (!list_empty(&matches)) { 4196 sop = list_entry(matches.next, struct nfs4_stateowner, 4197 so_perclient); 4198 /* unhash_stateowner deletes so_perclient only 4199 * for openowners. */ 4200 list_del(&sop->so_perclient); 4201 release_lockowner(sop); 4202 } 4203 out: 4204 nfs4_unlock_state(); 4205 return status; 4206 } 4207 4208 static inline struct nfs4_client_reclaim * 4209 alloc_reclaim(void) 4210 { 4211 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 4212 } 4213 4214 int 4215 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id) 4216 { 4217 unsigned int strhashval = clientstr_hashval(name); 4218 struct nfs4_client *clp; 4219 4220 clp = find_confirmed_client_by_str(name, strhashval); 4221 return clp ? 1 : 0; 4222 } 4223 4224 /* 4225 * failure => all reset bets are off, nfserr_no_grace... 4226 */ 4227 int 4228 nfs4_client_to_reclaim(const char *name) 4229 { 4230 unsigned int strhashval; 4231 struct nfs4_client_reclaim *crp = NULL; 4232 4233 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 4234 crp = alloc_reclaim(); 4235 if (!crp) 4236 return 0; 4237 strhashval = clientstr_hashval(name); 4238 INIT_LIST_HEAD(&crp->cr_strhash); 4239 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]); 4240 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 4241 reclaim_str_hashtbl_size++; 4242 return 1; 4243 } 4244 4245 static void 4246 nfs4_release_reclaim(void) 4247 { 4248 struct nfs4_client_reclaim *crp = NULL; 4249 int i; 4250 4251 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4252 while (!list_empty(&reclaim_str_hashtbl[i])) { 4253 crp = list_entry(reclaim_str_hashtbl[i].next, 4254 struct nfs4_client_reclaim, cr_strhash); 4255 list_del(&crp->cr_strhash); 4256 kfree(crp); 4257 reclaim_str_hashtbl_size--; 4258 } 4259 } 4260 BUG_ON(reclaim_str_hashtbl_size); 4261 } 4262 4263 /* 4264 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 4265 static struct nfs4_client_reclaim * 4266 nfs4_find_reclaim_client(clientid_t *clid) 4267 { 4268 unsigned int strhashval; 4269 struct nfs4_client *clp; 4270 struct nfs4_client_reclaim *crp = NULL; 4271 4272 4273 /* find clientid in conf_id_hashtbl */ 4274 clp = find_confirmed_client(clid); 4275 if (clp == NULL) 4276 return NULL; 4277 4278 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n", 4279 clp->cl_name.len, clp->cl_name.data, 4280 clp->cl_recdir); 4281 4282 /* find clp->cl_name in reclaim_str_hashtbl */ 4283 strhashval = clientstr_hashval(clp->cl_recdir); 4284 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) { 4285 if (same_name(crp->cr_recdir, clp->cl_recdir)) { 4286 return crp; 4287 } 4288 } 4289 return NULL; 4290 } 4291 4292 /* 4293 * Called from OPEN. Look for clientid in reclaim list. 4294 */ 4295 __be32 4296 nfs4_check_open_reclaim(clientid_t *clid) 4297 { 4298 return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad; 4299 } 4300 4301 /* initialization to perform at module load time: */ 4302 4303 int 4304 nfs4_state_init(void) 4305 { 4306 int i, status; 4307 4308 status = nfsd4_init_slabs(); 4309 if (status) 4310 return status; 4311 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4312 INIT_LIST_HEAD(&conf_id_hashtbl[i]); 4313 INIT_LIST_HEAD(&conf_str_hashtbl[i]); 4314 INIT_LIST_HEAD(&unconf_str_hashtbl[i]); 4315 INIT_LIST_HEAD(&unconf_id_hashtbl[i]); 4316 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]); 4317 } 4318 for (i = 0; i < SESSION_HASH_SIZE; i++) 4319 INIT_LIST_HEAD(&sessionid_hashtbl[i]); 4320 for (i = 0; i < FILE_HASH_SIZE; i++) { 4321 INIT_LIST_HEAD(&file_hashtbl[i]); 4322 } 4323 for (i = 0; i < OWNER_HASH_SIZE; i++) { 4324 INIT_LIST_HEAD(&ownerstr_hashtbl[i]); 4325 INIT_LIST_HEAD(&ownerid_hashtbl[i]); 4326 } 4327 for (i = 0; i < STATEID_HASH_SIZE; i++) { 4328 INIT_LIST_HEAD(&stateid_hashtbl[i]); 4329 INIT_LIST_HEAD(&lockstateid_hashtbl[i]); 4330 } 4331 for (i = 0; i < LOCK_HASH_SIZE; i++) { 4332 INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]); 4333 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]); 4334 } 4335 memset(&onestateid, ~0, sizeof(stateid_t)); 4336 INIT_LIST_HEAD(&close_lru); 4337 INIT_LIST_HEAD(&client_lru); 4338 INIT_LIST_HEAD(&del_recall_lru); 4339 reclaim_str_hashtbl_size = 0; 4340 return 0; 4341 } 4342 4343 static void 4344 nfsd4_load_reboot_recovery_data(void) 4345 { 4346 int status; 4347 4348 nfs4_lock_state(); 4349 nfsd4_init_recdir(user_recovery_dirname); 4350 status = nfsd4_recdir_load(); 4351 nfs4_unlock_state(); 4352 if (status) 4353 printk("NFSD: Failure reading reboot recovery data\n"); 4354 } 4355 4356 /* 4357 * Since the lifetime of a delegation isn't limited to that of an open, a 4358 * client may quite reasonably hang on to a delegation as long as it has 4359 * the inode cached. This becomes an obvious problem the first time a 4360 * client's inode cache approaches the size of the server's total memory. 4361 * 4362 * For now we avoid this problem by imposing a hard limit on the number 4363 * of delegations, which varies according to the server's memory size. 4364 */ 4365 static void 4366 set_max_delegations(void) 4367 { 4368 /* 4369 * Allow at most 4 delegations per megabyte of RAM. Quick 4370 * estimates suggest that in the worst case (where every delegation 4371 * is for a different inode), a delegation could take about 1.5K, 4372 * giving a worst case usage of about 6% of memory. 4373 */ 4374 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 4375 } 4376 4377 /* initialization to perform when the nfsd service is started: */ 4378 4379 static int 4380 __nfs4_state_start(void) 4381 { 4382 int ret; 4383 4384 boot_time = get_seconds(); 4385 locks_start_grace(&nfsd4_manager); 4386 printk(KERN_INFO "NFSD: starting %ld-second grace period\n", 4387 nfsd4_grace); 4388 ret = set_callback_cred(); 4389 if (ret) 4390 return -ENOMEM; 4391 laundry_wq = create_singlethread_workqueue("nfsd4"); 4392 if (laundry_wq == NULL) 4393 return -ENOMEM; 4394 ret = nfsd4_create_callback_queue(); 4395 if (ret) 4396 goto out_free_laundry; 4397 queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ); 4398 set_max_delegations(); 4399 return 0; 4400 out_free_laundry: 4401 destroy_workqueue(laundry_wq); 4402 return ret; 4403 } 4404 4405 int 4406 nfs4_state_start(void) 4407 { 4408 nfsd4_load_reboot_recovery_data(); 4409 return __nfs4_state_start(); 4410 } 4411 4412 static void 4413 __nfs4_state_shutdown(void) 4414 { 4415 int i; 4416 struct nfs4_client *clp = NULL; 4417 struct nfs4_delegation *dp = NULL; 4418 struct list_head *pos, *next, reaplist; 4419 4420 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4421 while (!list_empty(&conf_id_hashtbl[i])) { 4422 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 4423 expire_client(clp); 4424 } 4425 while (!list_empty(&unconf_str_hashtbl[i])) { 4426 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash); 4427 expire_client(clp); 4428 } 4429 } 4430 INIT_LIST_HEAD(&reaplist); 4431 spin_lock(&recall_lock); 4432 list_for_each_safe(pos, next, &del_recall_lru) { 4433 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4434 list_move(&dp->dl_recall_lru, &reaplist); 4435 } 4436 spin_unlock(&recall_lock); 4437 list_for_each_safe(pos, next, &reaplist) { 4438 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4439 list_del_init(&dp->dl_recall_lru); 4440 unhash_delegation(dp); 4441 } 4442 4443 nfsd4_shutdown_recdir(); 4444 } 4445 4446 void 4447 nfs4_state_shutdown(void) 4448 { 4449 cancel_delayed_work_sync(&laundromat_work); 4450 destroy_workqueue(laundry_wq); 4451 locks_end_grace(&nfsd4_manager); 4452 nfs4_lock_state(); 4453 nfs4_release_reclaim(); 4454 __nfs4_state_shutdown(); 4455 nfs4_unlock_state(); 4456 nfsd4_destroy_callback_queue(); 4457 } 4458 4459 /* 4460 * user_recovery_dirname is protected by the nfsd_mutex since it's only 4461 * accessed when nfsd is starting. 4462 */ 4463 static void 4464 nfs4_set_recdir(char *recdir) 4465 { 4466 strcpy(user_recovery_dirname, recdir); 4467 } 4468 4469 /* 4470 * Change the NFSv4 recovery directory to recdir. 4471 */ 4472 int 4473 nfs4_reset_recoverydir(char *recdir) 4474 { 4475 int status; 4476 struct path path; 4477 4478 status = kern_path(recdir, LOOKUP_FOLLOW, &path); 4479 if (status) 4480 return status; 4481 status = -ENOTDIR; 4482 if (S_ISDIR(path.dentry->d_inode->i_mode)) { 4483 nfs4_set_recdir(recdir); 4484 status = 0; 4485 } 4486 path_put(&path); 4487 return status; 4488 } 4489 4490 char * 4491 nfs4_recoverydir(void) 4492 { 4493 return user_recovery_dirname; 4494 } 4495