1 /* 2 * fs/nfs/nfs4state.c 3 * 4 * Client-side XDR for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Implementation of the NFSv4 state model. For the time being, 37 * this is minimal, but will be made much more complex in a 38 * subsequent patch. 39 */ 40 41 #include <linux/slab.h> 42 #include <linux/smp_lock.h> 43 #include <linux/nfs_fs.h> 44 #include <linux/nfs_idmap.h> 45 #include <linux/kthread.h> 46 #include <linux/module.h> 47 #include <linux/workqueue.h> 48 #include <linux/bitops.h> 49 50 #include "nfs4_fs.h" 51 #include "callback.h" 52 #include "delegation.h" 53 #include "internal.h" 54 55 #define OPENOWNER_POOL_SIZE 8 56 57 const nfs4_stateid zero_stateid; 58 59 static LIST_HEAD(nfs4_clientid_list); 60 61 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred) 62 { 63 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, 64 nfs_callback_tcpport, cred); 65 if (status == 0) 66 status = nfs4_proc_setclientid_confirm(clp, cred); 67 if (status == 0) 68 nfs4_schedule_state_renewal(clp); 69 return status; 70 } 71 72 u32 73 nfs4_alloc_lockowner_id(struct nfs_client *clp) 74 { 75 return clp->cl_lockowner_id ++; 76 } 77 78 static struct nfs4_state_owner * 79 nfs4_client_grab_unused(struct nfs_client *clp, struct rpc_cred *cred) 80 { 81 struct nfs4_state_owner *sp = NULL; 82 83 if (!list_empty(&clp->cl_unused)) { 84 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list); 85 atomic_inc(&sp->so_count); 86 sp->so_cred = cred; 87 list_move(&sp->so_list, &clp->cl_state_owners); 88 clp->cl_nunused--; 89 } 90 return sp; 91 } 92 93 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) 94 { 95 struct nfs4_state_owner *sp; 96 struct rpc_cred *cred = NULL; 97 98 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 99 if (list_empty(&sp->so_states)) 100 continue; 101 cred = get_rpccred(sp->so_cred); 102 break; 103 } 104 return cred; 105 } 106 107 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 108 { 109 struct nfs4_state_owner *sp; 110 111 if (!list_empty(&clp->cl_state_owners)) { 112 sp = list_entry(clp->cl_state_owners.next, 113 struct nfs4_state_owner, so_list); 114 return get_rpccred(sp->so_cred); 115 } 116 return NULL; 117 } 118 119 static struct nfs4_state_owner * 120 nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred) 121 { 122 struct nfs4_state_owner *sp, *res = NULL; 123 124 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 125 if (sp->so_cred != cred) 126 continue; 127 atomic_inc(&sp->so_count); 128 /* Move to the head of the list */ 129 list_move(&sp->so_list, &clp->cl_state_owners); 130 res = sp; 131 break; 132 } 133 return res; 134 } 135 136 /* 137 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to 138 * create a new state_owner. 139 * 140 */ 141 static struct nfs4_state_owner * 142 nfs4_alloc_state_owner(void) 143 { 144 struct nfs4_state_owner *sp; 145 146 sp = kzalloc(sizeof(*sp),GFP_KERNEL); 147 if (!sp) 148 return NULL; 149 spin_lock_init(&sp->so_lock); 150 INIT_LIST_HEAD(&sp->so_states); 151 INIT_LIST_HEAD(&sp->so_delegations); 152 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); 153 sp->so_seqid.sequence = &sp->so_sequence; 154 spin_lock_init(&sp->so_sequence.lock); 155 INIT_LIST_HEAD(&sp->so_sequence.list); 156 atomic_set(&sp->so_count, 1); 157 return sp; 158 } 159 160 void 161 nfs4_drop_state_owner(struct nfs4_state_owner *sp) 162 { 163 struct nfs_client *clp = sp->so_client; 164 spin_lock(&clp->cl_lock); 165 list_del_init(&sp->so_list); 166 spin_unlock(&clp->cl_lock); 167 } 168 169 /* 170 * Note: must be called with clp->cl_sem held in order to prevent races 171 * with reboot recovery! 172 */ 173 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) 174 { 175 struct nfs_client *clp = server->nfs_client; 176 struct nfs4_state_owner *sp, *new; 177 178 get_rpccred(cred); 179 new = nfs4_alloc_state_owner(); 180 spin_lock(&clp->cl_lock); 181 sp = nfs4_find_state_owner(clp, cred); 182 if (sp == NULL) 183 sp = nfs4_client_grab_unused(clp, cred); 184 if (sp == NULL && new != NULL) { 185 list_add(&new->so_list, &clp->cl_state_owners); 186 new->so_client = clp; 187 new->so_id = nfs4_alloc_lockowner_id(clp); 188 new->so_cred = cred; 189 sp = new; 190 new = NULL; 191 } 192 spin_unlock(&clp->cl_lock); 193 kfree(new); 194 if (sp != NULL) 195 return sp; 196 put_rpccred(cred); 197 return NULL; 198 } 199 200 /* 201 * Must be called with clp->cl_sem held in order to avoid races 202 * with state recovery... 203 */ 204 void nfs4_put_state_owner(struct nfs4_state_owner *sp) 205 { 206 struct nfs_client *clp = sp->so_client; 207 struct rpc_cred *cred = sp->so_cred; 208 209 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 210 return; 211 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE) 212 goto out_free; 213 if (list_empty(&sp->so_list)) 214 goto out_free; 215 list_move(&sp->so_list, &clp->cl_unused); 216 clp->cl_nunused++; 217 spin_unlock(&clp->cl_lock); 218 put_rpccred(cred); 219 cred = NULL; 220 return; 221 out_free: 222 list_del(&sp->so_list); 223 spin_unlock(&clp->cl_lock); 224 put_rpccred(cred); 225 kfree(sp); 226 } 227 228 static struct nfs4_state * 229 nfs4_alloc_open_state(void) 230 { 231 struct nfs4_state *state; 232 233 state = kzalloc(sizeof(*state), GFP_KERNEL); 234 if (!state) 235 return NULL; 236 atomic_set(&state->count, 1); 237 INIT_LIST_HEAD(&state->lock_states); 238 spin_lock_init(&state->state_lock); 239 return state; 240 } 241 242 void 243 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode) 244 { 245 if (state->state == mode) 246 return; 247 /* NB! List reordering - see the reclaim code for why. */ 248 if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { 249 if (mode & FMODE_WRITE) 250 list_move(&state->open_states, &state->owner->so_states); 251 else 252 list_move_tail(&state->open_states, &state->owner->so_states); 253 } 254 if (mode == 0) 255 list_del_init(&state->inode_states); 256 state->state = mode; 257 } 258 259 static struct nfs4_state * 260 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) 261 { 262 struct nfs_inode *nfsi = NFS_I(inode); 263 struct nfs4_state *state; 264 265 list_for_each_entry(state, &nfsi->open_states, inode_states) { 266 /* Is this in the process of being freed? */ 267 if (state->state == 0) 268 continue; 269 if (state->owner == owner) { 270 atomic_inc(&state->count); 271 return state; 272 } 273 } 274 return NULL; 275 } 276 277 static void 278 nfs4_free_open_state(struct nfs4_state *state) 279 { 280 kfree(state); 281 } 282 283 struct nfs4_state * 284 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) 285 { 286 struct nfs4_state *state, *new; 287 struct nfs_inode *nfsi = NFS_I(inode); 288 289 spin_lock(&inode->i_lock); 290 state = __nfs4_find_state_byowner(inode, owner); 291 spin_unlock(&inode->i_lock); 292 if (state) 293 goto out; 294 new = nfs4_alloc_open_state(); 295 spin_lock(&owner->so_lock); 296 spin_lock(&inode->i_lock); 297 state = __nfs4_find_state_byowner(inode, owner); 298 if (state == NULL && new != NULL) { 299 state = new; 300 state->owner = owner; 301 atomic_inc(&owner->so_count); 302 list_add(&state->inode_states, &nfsi->open_states); 303 state->inode = igrab(inode); 304 spin_unlock(&inode->i_lock); 305 /* Note: The reclaim code dictates that we add stateless 306 * and read-only stateids to the end of the list */ 307 list_add_tail(&state->open_states, &owner->so_states); 308 spin_unlock(&owner->so_lock); 309 } else { 310 spin_unlock(&inode->i_lock); 311 spin_unlock(&owner->so_lock); 312 if (new) 313 nfs4_free_open_state(new); 314 } 315 out: 316 return state; 317 } 318 319 /* 320 * Beware! Caller must be holding exactly one 321 * reference to clp->cl_sem! 322 */ 323 void nfs4_put_open_state(struct nfs4_state *state) 324 { 325 struct inode *inode = state->inode; 326 struct nfs4_state_owner *owner = state->owner; 327 328 if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) 329 return; 330 spin_lock(&inode->i_lock); 331 if (!list_empty(&state->inode_states)) 332 list_del(&state->inode_states); 333 list_del(&state->open_states); 334 spin_unlock(&inode->i_lock); 335 spin_unlock(&owner->so_lock); 336 iput(inode); 337 nfs4_free_open_state(state); 338 nfs4_put_state_owner(owner); 339 } 340 341 /* 342 * Close the current file. 343 */ 344 void nfs4_close_state(struct nfs4_state *state, mode_t mode) 345 { 346 struct inode *inode = state->inode; 347 struct nfs4_state_owner *owner = state->owner; 348 int oldstate, newstate = 0; 349 350 atomic_inc(&owner->so_count); 351 /* Protect against nfs4_find_state() */ 352 spin_lock(&owner->so_lock); 353 spin_lock(&inode->i_lock); 354 switch (mode & (FMODE_READ | FMODE_WRITE)) { 355 case FMODE_READ: 356 state->n_rdonly--; 357 break; 358 case FMODE_WRITE: 359 state->n_wronly--; 360 break; 361 case FMODE_READ|FMODE_WRITE: 362 state->n_rdwr--; 363 } 364 oldstate = newstate = state->state; 365 if (state->n_rdwr == 0) { 366 if (state->n_rdonly == 0) 367 newstate &= ~FMODE_READ; 368 if (state->n_wronly == 0) 369 newstate &= ~FMODE_WRITE; 370 } 371 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 372 nfs4_state_set_mode_locked(state, newstate); 373 oldstate = newstate; 374 } 375 spin_unlock(&inode->i_lock); 376 spin_unlock(&owner->so_lock); 377 378 if (oldstate != newstate && nfs4_do_close(inode, state) == 0) 379 return; 380 nfs4_put_open_state(state); 381 nfs4_put_state_owner(owner); 382 } 383 384 /* 385 * Search the state->lock_states for an existing lock_owner 386 * that is compatible with current->files 387 */ 388 static struct nfs4_lock_state * 389 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) 390 { 391 struct nfs4_lock_state *pos; 392 list_for_each_entry(pos, &state->lock_states, ls_locks) { 393 if (pos->ls_owner != fl_owner) 394 continue; 395 atomic_inc(&pos->ls_count); 396 return pos; 397 } 398 return NULL; 399 } 400 401 /* 402 * Return a compatible lock_state. If no initialized lock_state structure 403 * exists, return an uninitialized one. 404 * 405 */ 406 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) 407 { 408 struct nfs4_lock_state *lsp; 409 struct nfs_client *clp = state->owner->so_client; 410 411 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); 412 if (lsp == NULL) 413 return NULL; 414 lsp->ls_seqid.sequence = &state->owner->so_sequence; 415 atomic_set(&lsp->ls_count, 1); 416 lsp->ls_owner = fl_owner; 417 spin_lock(&clp->cl_lock); 418 lsp->ls_id = nfs4_alloc_lockowner_id(clp); 419 spin_unlock(&clp->cl_lock); 420 INIT_LIST_HEAD(&lsp->ls_locks); 421 return lsp; 422 } 423 424 /* 425 * Return a compatible lock_state. If no initialized lock_state structure 426 * exists, return an uninitialized one. 427 * 428 * The caller must be holding clp->cl_sem 429 */ 430 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) 431 { 432 struct nfs4_lock_state *lsp, *new = NULL; 433 434 for(;;) { 435 spin_lock(&state->state_lock); 436 lsp = __nfs4_find_lock_state(state, owner); 437 if (lsp != NULL) 438 break; 439 if (new != NULL) { 440 new->ls_state = state; 441 list_add(&new->ls_locks, &state->lock_states); 442 set_bit(LK_STATE_IN_USE, &state->flags); 443 lsp = new; 444 new = NULL; 445 break; 446 } 447 spin_unlock(&state->state_lock); 448 new = nfs4_alloc_lock_state(state, owner); 449 if (new == NULL) 450 return NULL; 451 } 452 spin_unlock(&state->state_lock); 453 kfree(new); 454 return lsp; 455 } 456 457 /* 458 * Release reference to lock_state, and free it if we see that 459 * it is no longer in use 460 */ 461 void nfs4_put_lock_state(struct nfs4_lock_state *lsp) 462 { 463 struct nfs4_state *state; 464 465 if (lsp == NULL) 466 return; 467 state = lsp->ls_state; 468 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) 469 return; 470 list_del(&lsp->ls_locks); 471 if (list_empty(&state->lock_states)) 472 clear_bit(LK_STATE_IN_USE, &state->flags); 473 spin_unlock(&state->state_lock); 474 kfree(lsp); 475 } 476 477 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) 478 { 479 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; 480 481 dst->fl_u.nfs4_fl.owner = lsp; 482 atomic_inc(&lsp->ls_count); 483 } 484 485 static void nfs4_fl_release_lock(struct file_lock *fl) 486 { 487 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); 488 } 489 490 static struct file_lock_operations nfs4_fl_lock_ops = { 491 .fl_copy_lock = nfs4_fl_copy_lock, 492 .fl_release_private = nfs4_fl_release_lock, 493 }; 494 495 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) 496 { 497 struct nfs4_lock_state *lsp; 498 499 if (fl->fl_ops != NULL) 500 return 0; 501 lsp = nfs4_get_lock_state(state, fl->fl_owner); 502 if (lsp == NULL) 503 return -ENOMEM; 504 fl->fl_u.nfs4_fl.owner = lsp; 505 fl->fl_ops = &nfs4_fl_lock_ops; 506 return 0; 507 } 508 509 /* 510 * Byte-range lock aware utility to initialize the stateid of read/write 511 * requests. 512 */ 513 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) 514 { 515 struct nfs4_lock_state *lsp; 516 517 memcpy(dst, &state->stateid, sizeof(*dst)); 518 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 519 return; 520 521 spin_lock(&state->state_lock); 522 lsp = __nfs4_find_lock_state(state, fl_owner); 523 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 524 memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 525 spin_unlock(&state->state_lock); 526 nfs4_put_lock_state(lsp); 527 } 528 529 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) 530 { 531 struct rpc_sequence *sequence = counter->sequence; 532 struct nfs_seqid *new; 533 534 new = kmalloc(sizeof(*new), GFP_KERNEL); 535 if (new != NULL) { 536 new->sequence = counter; 537 spin_lock(&sequence->lock); 538 list_add_tail(&new->list, &sequence->list); 539 spin_unlock(&sequence->lock); 540 } 541 return new; 542 } 543 544 void nfs_free_seqid(struct nfs_seqid *seqid) 545 { 546 struct rpc_sequence *sequence = seqid->sequence->sequence; 547 548 spin_lock(&sequence->lock); 549 list_del(&seqid->list); 550 spin_unlock(&sequence->lock); 551 rpc_wake_up(&sequence->wait); 552 kfree(seqid); 553 } 554 555 /* 556 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or 557 * failed with a seqid incrementing error - 558 * see comments nfs_fs.h:seqid_mutating_error() 559 */ 560 static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid) 561 { 562 switch (status) { 563 case 0: 564 break; 565 case -NFS4ERR_BAD_SEQID: 566 case -NFS4ERR_STALE_CLIENTID: 567 case -NFS4ERR_STALE_STATEID: 568 case -NFS4ERR_BAD_STATEID: 569 case -NFS4ERR_BADXDR: 570 case -NFS4ERR_RESOURCE: 571 case -NFS4ERR_NOFILEHANDLE: 572 /* Non-seqid mutating errors */ 573 return; 574 }; 575 /* 576 * Note: no locking needed as we are guaranteed to be first 577 * on the sequence list 578 */ 579 seqid->sequence->counter++; 580 } 581 582 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) 583 { 584 if (status == -NFS4ERR_BAD_SEQID) { 585 struct nfs4_state_owner *sp = container_of(seqid->sequence, 586 struct nfs4_state_owner, so_seqid); 587 nfs4_drop_state_owner(sp); 588 } 589 return nfs_increment_seqid(status, seqid); 590 } 591 592 /* 593 * Increment the seqid if the LOCK/LOCKU succeeded, or 594 * failed with a seqid incrementing error - 595 * see comments nfs_fs.h:seqid_mutating_error() 596 */ 597 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) 598 { 599 return nfs_increment_seqid(status, seqid); 600 } 601 602 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) 603 { 604 struct rpc_sequence *sequence = seqid->sequence->sequence; 605 int status = 0; 606 607 if (sequence->list.next == &seqid->list) 608 goto out; 609 spin_lock(&sequence->lock); 610 if (sequence->list.next != &seqid->list) { 611 rpc_sleep_on(&sequence->wait, task, NULL, NULL); 612 status = -EAGAIN; 613 } 614 spin_unlock(&sequence->lock); 615 out: 616 return status; 617 } 618 619 static int reclaimer(void *); 620 621 static inline void nfs4_clear_recover_bit(struct nfs_client *clp) 622 { 623 smp_mb__before_clear_bit(); 624 clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state); 625 smp_mb__after_clear_bit(); 626 wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER); 627 rpc_wake_up(&clp->cl_rpcwaitq); 628 } 629 630 /* 631 * State recovery routine 632 */ 633 static void nfs4_recover_state(struct nfs_client *clp) 634 { 635 struct task_struct *task; 636 637 __module_get(THIS_MODULE); 638 atomic_inc(&clp->cl_count); 639 task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim", 640 NIPQUAD(clp->cl_addr.sin_addr)); 641 if (!IS_ERR(task)) 642 return; 643 nfs4_clear_recover_bit(clp); 644 nfs_put_client(clp); 645 module_put(THIS_MODULE); 646 } 647 648 /* 649 * Schedule a state recovery attempt 650 */ 651 void nfs4_schedule_state_recovery(struct nfs_client *clp) 652 { 653 if (!clp) 654 return; 655 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0) 656 nfs4_recover_state(clp); 657 } 658 659 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state) 660 { 661 struct inode *inode = state->inode; 662 struct file_lock *fl; 663 int status = 0; 664 665 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) { 666 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 667 continue; 668 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state) 669 continue; 670 status = ops->recover_lock(state, fl); 671 if (status >= 0) 672 continue; 673 switch (status) { 674 default: 675 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 676 __FUNCTION__, status); 677 case -NFS4ERR_EXPIRED: 678 case -NFS4ERR_NO_GRACE: 679 case -NFS4ERR_RECLAIM_BAD: 680 case -NFS4ERR_RECLAIM_CONFLICT: 681 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 682 break; 683 case -NFS4ERR_STALE_CLIENTID: 684 goto out_err; 685 } 686 } 687 return 0; 688 out_err: 689 return status; 690 } 691 692 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp) 693 { 694 struct nfs4_state *state; 695 struct nfs4_lock_state *lock; 696 int status = 0; 697 698 /* Note: we rely on the sp->so_states list being ordered 699 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) 700 * states first. 701 * This is needed to ensure that the server won't give us any 702 * read delegations that we have to return if, say, we are 703 * recovering after a network partition or a reboot from a 704 * server that doesn't support a grace period. 705 */ 706 list_for_each_entry(state, &sp->so_states, open_states) { 707 if (state->state == 0) 708 continue; 709 status = ops->recover_open(sp, state); 710 if (status >= 0) { 711 status = nfs4_reclaim_locks(ops, state); 712 if (status < 0) 713 goto out_err; 714 list_for_each_entry(lock, &state->lock_states, ls_locks) { 715 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) 716 printk("%s: Lock reclaim failed!\n", 717 __FUNCTION__); 718 } 719 continue; 720 } 721 switch (status) { 722 default: 723 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 724 __FUNCTION__, status); 725 case -ENOENT: 726 case -NFS4ERR_RECLAIM_BAD: 727 case -NFS4ERR_RECLAIM_CONFLICT: 728 /* 729 * Open state on this file cannot be recovered 730 * All we can do is revert to using the zero stateid. 731 */ 732 memset(state->stateid.data, 0, 733 sizeof(state->stateid.data)); 734 /* Mark the file as being 'closed' */ 735 state->state = 0; 736 break; 737 case -NFS4ERR_EXPIRED: 738 case -NFS4ERR_NO_GRACE: 739 case -NFS4ERR_STALE_CLIENTID: 740 goto out_err; 741 } 742 } 743 return 0; 744 out_err: 745 return status; 746 } 747 748 static void nfs4_state_mark_reclaim(struct nfs_client *clp) 749 { 750 struct nfs4_state_owner *sp; 751 struct nfs4_state *state; 752 struct nfs4_lock_state *lock; 753 754 /* Reset all sequence ids to zero */ 755 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 756 sp->so_seqid.counter = 0; 757 sp->so_seqid.flags = 0; 758 spin_lock(&sp->so_lock); 759 list_for_each_entry(state, &sp->so_states, open_states) { 760 list_for_each_entry(lock, &state->lock_states, ls_locks) { 761 lock->ls_seqid.counter = 0; 762 lock->ls_seqid.flags = 0; 763 lock->ls_flags &= ~NFS_LOCK_INITIALIZED; 764 } 765 } 766 spin_unlock(&sp->so_lock); 767 } 768 } 769 770 static int reclaimer(void *ptr) 771 { 772 struct nfs_client *clp = ptr; 773 struct nfs4_state_owner *sp; 774 struct nfs4_state_recovery_ops *ops; 775 struct rpc_cred *cred; 776 int status = 0; 777 778 allow_signal(SIGKILL); 779 780 /* Ensure exclusive access to NFSv4 state */ 781 lock_kernel(); 782 down_write(&clp->cl_sem); 783 /* Are there any NFS mounts out there? */ 784 if (list_empty(&clp->cl_superblocks)) 785 goto out; 786 restart_loop: 787 ops = &nfs4_network_partition_recovery_ops; 788 /* Are there any open files on this volume? */ 789 cred = nfs4_get_renew_cred(clp); 790 if (cred != NULL) { 791 /* Yes there are: try to renew the old lease */ 792 status = nfs4_proc_renew(clp, cred); 793 switch (status) { 794 case 0: 795 case -NFS4ERR_CB_PATH_DOWN: 796 put_rpccred(cred); 797 goto out; 798 case -NFS4ERR_STALE_CLIENTID: 799 case -NFS4ERR_LEASE_MOVED: 800 ops = &nfs4_reboot_recovery_ops; 801 } 802 } else { 803 /* "reboot" to ensure we clear all state on the server */ 804 clp->cl_boot_time = CURRENT_TIME; 805 cred = nfs4_get_setclientid_cred(clp); 806 } 807 /* We're going to have to re-establish a clientid */ 808 nfs4_state_mark_reclaim(clp); 809 status = -ENOENT; 810 if (cred != NULL) { 811 status = nfs4_init_client(clp, cred); 812 put_rpccred(cred); 813 } 814 if (status) 815 goto out_error; 816 /* Mark all delegations for reclaim */ 817 nfs_delegation_mark_reclaim(clp); 818 /* Note: list is protected by exclusive lock on cl->cl_sem */ 819 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 820 status = nfs4_reclaim_open_state(ops, sp); 821 if (status < 0) { 822 if (status == -NFS4ERR_NO_GRACE) { 823 ops = &nfs4_network_partition_recovery_ops; 824 status = nfs4_reclaim_open_state(ops, sp); 825 } 826 if (status == -NFS4ERR_STALE_CLIENTID) 827 goto restart_loop; 828 if (status == -NFS4ERR_EXPIRED) 829 goto restart_loop; 830 } 831 } 832 nfs_delegation_reap_unclaimed(clp); 833 out: 834 up_write(&clp->cl_sem); 835 unlock_kernel(); 836 if (status == -NFS4ERR_CB_PATH_DOWN) 837 nfs_handle_cb_pathdown(clp); 838 nfs4_clear_recover_bit(clp); 839 nfs_put_client(clp); 840 module_put_and_exit(0); 841 return 0; 842 out_error: 843 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n", 844 NIPQUAD(clp->cl_addr.sin_addr), -status); 845 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 846 goto out; 847 } 848 849 /* 850 * Local variables: 851 * c-basic-offset: 8 852 * End: 853 */ 854