1 /* 2 * linux/fs/nfs/delegation.c 3 * 4 * Copyright (C) 2004 Trond Myklebust 5 * 6 * NFS file delegation management 7 * 8 */ 9 #include <linux/completion.h> 10 #include <linux/kthread.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/smp_lock.h> 14 #include <linux/spinlock.h> 15 16 #include <linux/nfs4.h> 17 #include <linux/nfs_fs.h> 18 #include <linux/nfs_xdr.h> 19 20 #include "nfs4_fs.h" 21 #include "delegation.h" 22 #include "internal.h" 23 24 static void nfs_do_free_delegation(struct nfs_delegation *delegation) 25 { 26 kfree(delegation); 27 } 28 29 static void nfs_free_delegation_callback(struct rcu_head *head) 30 { 31 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu); 32 33 nfs_do_free_delegation(delegation); 34 } 35 36 static void nfs_free_delegation(struct nfs_delegation *delegation) 37 { 38 struct rpc_cred *cred; 39 40 cred = rcu_dereference(delegation->cred); 41 rcu_assign_pointer(delegation->cred, NULL); 42 call_rcu(&delegation->rcu, nfs_free_delegation_callback); 43 if (cred) 44 put_rpccred(cred); 45 } 46 47 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) 48 { 49 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); 50 } 51 52 int nfs_have_delegation(struct inode *inode, fmode_t flags) 53 { 54 struct nfs_delegation *delegation; 55 int ret = 0; 56 57 flags &= FMODE_READ|FMODE_WRITE; 58 rcu_read_lock(); 59 delegation = rcu_dereference(NFS_I(inode)->delegation); 60 if (delegation != NULL && (delegation->type & flags) == flags) { 61 nfs_mark_delegation_referenced(delegation); 62 ret = 1; 63 } 64 rcu_read_unlock(); 65 return ret; 66 } 67 68 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) 69 { 70 struct inode *inode = state->inode; 71 struct file_lock *fl; 72 int status = 0; 73 74 if (inode->i_flock == NULL) 75 goto out; 76 77 /* Protect inode->i_flock using the BKL */ 78 lock_kernel(); 79 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 80 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 81 continue; 82 if (nfs_file_open_context(fl->fl_file) != ctx) 83 continue; 84 unlock_kernel(); 85 status = nfs4_lock_delegation_recall(state, fl); 86 if (status < 0) 87 goto out; 88 lock_kernel(); 89 } 90 unlock_kernel(); 91 out: 92 return status; 93 } 94 95 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) 96 { 97 struct nfs_inode *nfsi = NFS_I(inode); 98 struct nfs_open_context *ctx; 99 struct nfs4_state *state; 100 int err; 101 102 again: 103 spin_lock(&inode->i_lock); 104 list_for_each_entry(ctx, &nfsi->open_files, list) { 105 state = ctx->state; 106 if (state == NULL) 107 continue; 108 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 109 continue; 110 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) 111 continue; 112 get_nfs_open_context(ctx); 113 spin_unlock(&inode->i_lock); 114 err = nfs4_open_delegation_recall(ctx, state, stateid); 115 if (err >= 0) 116 err = nfs_delegation_claim_locks(ctx, state); 117 put_nfs_open_context(ctx); 118 if (err != 0) 119 return err; 120 goto again; 121 } 122 spin_unlock(&inode->i_lock); 123 return 0; 124 } 125 126 /* 127 * Set up a delegation on an inode 128 */ 129 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) 130 { 131 struct nfs_delegation *delegation = NFS_I(inode)->delegation; 132 struct rpc_cred *oldcred; 133 134 if (delegation == NULL) 135 return; 136 memcpy(delegation->stateid.data, res->delegation.data, 137 sizeof(delegation->stateid.data)); 138 delegation->type = res->delegation_type; 139 delegation->maxsize = res->maxsize; 140 oldcred = delegation->cred; 141 delegation->cred = get_rpccred(cred); 142 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 143 NFS_I(inode)->delegation_state = delegation->type; 144 smp_wmb(); 145 put_rpccred(oldcred); 146 } 147 148 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) 149 { 150 int res = 0; 151 152 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync); 153 nfs_free_delegation(delegation); 154 return res; 155 } 156 157 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation) 158 { 159 struct inode *inode = NULL; 160 161 spin_lock(&delegation->lock); 162 if (delegation->inode != NULL) 163 inode = igrab(delegation->inode); 164 spin_unlock(&delegation->lock); 165 return inode; 166 } 167 168 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid) 169 { 170 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation); 171 172 if (delegation == NULL) 173 goto nomatch; 174 spin_lock(&delegation->lock); 175 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data, 176 sizeof(delegation->stateid.data)) != 0) 177 goto nomatch_unlock; 178 list_del_rcu(&delegation->super_list); 179 delegation->inode = NULL; 180 nfsi->delegation_state = 0; 181 rcu_assign_pointer(nfsi->delegation, NULL); 182 spin_unlock(&delegation->lock); 183 return delegation; 184 nomatch_unlock: 185 spin_unlock(&delegation->lock); 186 nomatch: 187 return NULL; 188 } 189 190 /* 191 * Set up a delegation on an inode 192 */ 193 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) 194 { 195 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 196 struct nfs_inode *nfsi = NFS_I(inode); 197 struct nfs_delegation *delegation; 198 struct nfs_delegation *freeme = NULL; 199 int status = 0; 200 201 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL); 202 if (delegation == NULL) 203 return -ENOMEM; 204 memcpy(delegation->stateid.data, res->delegation.data, 205 sizeof(delegation->stateid.data)); 206 delegation->type = res->delegation_type; 207 delegation->maxsize = res->maxsize; 208 delegation->change_attr = nfsi->change_attr; 209 delegation->cred = get_rpccred(cred); 210 delegation->inode = inode; 211 delegation->flags = 1<<NFS_DELEGATION_REFERENCED; 212 spin_lock_init(&delegation->lock); 213 214 spin_lock(&clp->cl_lock); 215 if (rcu_dereference(nfsi->delegation) != NULL) { 216 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, 217 sizeof(delegation->stateid)) == 0 && 218 delegation->type == nfsi->delegation->type) { 219 goto out; 220 } 221 /* 222 * Deal with broken servers that hand out two 223 * delegations for the same file. 224 */ 225 dfprintk(FILE, "%s: server %s handed out " 226 "a duplicate delegation!\n", 227 __func__, clp->cl_hostname); 228 if (delegation->type <= nfsi->delegation->type) { 229 freeme = delegation; 230 delegation = NULL; 231 goto out; 232 } 233 freeme = nfs_detach_delegation_locked(nfsi, NULL); 234 } 235 list_add_rcu(&delegation->super_list, &clp->cl_delegations); 236 nfsi->delegation_state = delegation->type; 237 rcu_assign_pointer(nfsi->delegation, delegation); 238 delegation = NULL; 239 240 /* Ensure we revalidate the attributes and page cache! */ 241 spin_lock(&inode->i_lock); 242 nfsi->cache_validity |= NFS_INO_REVAL_FORCED; 243 spin_unlock(&inode->i_lock); 244 245 out: 246 spin_unlock(&clp->cl_lock); 247 if (delegation != NULL) 248 nfs_free_delegation(delegation); 249 if (freeme != NULL) 250 nfs_do_return_delegation(inode, freeme, 0); 251 return status; 252 } 253 254 /* Sync all data to disk upon delegation return */ 255 static void nfs_msync_inode(struct inode *inode) 256 { 257 filemap_fdatawrite(inode->i_mapping); 258 nfs_wb_all(inode); 259 filemap_fdatawait(inode->i_mapping); 260 } 261 262 /* 263 * Basic procedure for returning a delegation to the server 264 */ 265 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) 266 { 267 struct nfs_inode *nfsi = NFS_I(inode); 268 int err; 269 270 /* 271 * Guard against new delegated open/lock/unlock calls and against 272 * state recovery 273 */ 274 down_write(&nfsi->rwsem); 275 err = nfs_delegation_claim_opens(inode, &delegation->stateid); 276 up_write(&nfsi->rwsem); 277 if (err) 278 goto out; 279 280 err = nfs_do_return_delegation(inode, delegation, issync); 281 out: 282 return err; 283 } 284 285 /* 286 * Return all delegations that have been marked for return 287 */ 288 int nfs_client_return_marked_delegations(struct nfs_client *clp) 289 { 290 struct nfs_delegation *delegation; 291 struct inode *inode; 292 int err = 0; 293 294 restart: 295 rcu_read_lock(); 296 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 297 if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 298 continue; 299 inode = nfs_delegation_grab_inode(delegation); 300 if (inode == NULL) 301 continue; 302 spin_lock(&clp->cl_lock); 303 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); 304 spin_unlock(&clp->cl_lock); 305 rcu_read_unlock(); 306 if (delegation != NULL) { 307 filemap_flush(inode->i_mapping); 308 err = __nfs_inode_return_delegation(inode, delegation, 0); 309 } 310 iput(inode); 311 if (!err) 312 goto restart; 313 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 314 return err; 315 } 316 rcu_read_unlock(); 317 return 0; 318 } 319 320 /* 321 * This function returns the delegation without reclaiming opens 322 * or protecting against delegation reclaims. 323 * It is therefore really only safe to be called from 324 * nfs4_clear_inode() 325 */ 326 void nfs_inode_return_delegation_noreclaim(struct inode *inode) 327 { 328 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 329 struct nfs_inode *nfsi = NFS_I(inode); 330 struct nfs_delegation *delegation; 331 332 if (rcu_dereference(nfsi->delegation) != NULL) { 333 spin_lock(&clp->cl_lock); 334 delegation = nfs_detach_delegation_locked(nfsi, NULL); 335 spin_unlock(&clp->cl_lock); 336 if (delegation != NULL) 337 nfs_do_return_delegation(inode, delegation, 0); 338 } 339 } 340 341 int nfs_inode_return_delegation(struct inode *inode) 342 { 343 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 344 struct nfs_inode *nfsi = NFS_I(inode); 345 struct nfs_delegation *delegation; 346 int err = 0; 347 348 if (rcu_dereference(nfsi->delegation) != NULL) { 349 spin_lock(&clp->cl_lock); 350 delegation = nfs_detach_delegation_locked(nfsi, NULL); 351 spin_unlock(&clp->cl_lock); 352 if (delegation != NULL) { 353 nfs_msync_inode(inode); 354 err = __nfs_inode_return_delegation(inode, delegation, 1); 355 } 356 } 357 return err; 358 } 359 360 static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation) 361 { 362 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 363 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 364 } 365 366 /* 367 * Return all delegations associated to a super block 368 */ 369 void nfs_super_return_all_delegations(struct super_block *sb) 370 { 371 struct nfs_client *clp = NFS_SB(sb)->nfs_client; 372 struct nfs_delegation *delegation; 373 374 if (clp == NULL) 375 return; 376 rcu_read_lock(); 377 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 378 spin_lock(&delegation->lock); 379 if (delegation->inode != NULL && delegation->inode->i_sb == sb) 380 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 381 spin_unlock(&delegation->lock); 382 } 383 rcu_read_unlock(); 384 if (nfs_client_return_marked_delegations(clp) != 0) 385 nfs4_schedule_state_manager(clp); 386 } 387 388 static 389 void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags) 390 { 391 struct nfs_delegation *delegation; 392 393 rcu_read_lock(); 394 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 395 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE)) 396 continue; 397 if (delegation->type & flags) 398 nfs_mark_return_delegation(clp, delegation); 399 } 400 rcu_read_unlock(); 401 } 402 403 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp) 404 { 405 nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE); 406 } 407 408 static void nfs_delegation_run_state_manager(struct nfs_client *clp) 409 { 410 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) 411 nfs4_schedule_state_manager(clp); 412 } 413 414 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags) 415 { 416 nfs_client_mark_return_all_delegation_types(clp, flags); 417 nfs_delegation_run_state_manager(clp); 418 } 419 420 void nfs_expire_all_delegations(struct nfs_client *clp) 421 { 422 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE); 423 } 424 425 /* 426 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error. 427 */ 428 void nfs_handle_cb_pathdown(struct nfs_client *clp) 429 { 430 if (clp == NULL) 431 return; 432 nfs_client_mark_return_all_delegations(clp); 433 } 434 435 static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp) 436 { 437 struct nfs_delegation *delegation; 438 439 rcu_read_lock(); 440 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 441 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) 442 continue; 443 nfs_mark_return_delegation(clp, delegation); 444 } 445 rcu_read_unlock(); 446 } 447 448 void nfs_expire_unreferenced_delegations(struct nfs_client *clp) 449 { 450 nfs_client_mark_return_unreferenced_delegations(clp); 451 nfs_delegation_run_state_manager(clp); 452 } 453 454 /* 455 * Asynchronous delegation recall! 456 */ 457 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid, 458 int (*validate_stateid)(struct nfs_delegation *delegation, 459 const nfs4_stateid *stateid)) 460 { 461 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 462 struct nfs_delegation *delegation; 463 464 rcu_read_lock(); 465 delegation = rcu_dereference(NFS_I(inode)->delegation); 466 467 if (!validate_stateid(delegation, stateid)) { 468 rcu_read_unlock(); 469 return -ENOENT; 470 } 471 472 nfs_mark_return_delegation(clp, delegation); 473 rcu_read_unlock(); 474 nfs_delegation_run_state_manager(clp); 475 return 0; 476 } 477 478 /* 479 * Retrieve the inode associated with a delegation 480 */ 481 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle) 482 { 483 struct nfs_delegation *delegation; 484 struct inode *res = NULL; 485 rcu_read_lock(); 486 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 487 spin_lock(&delegation->lock); 488 if (delegation->inode != NULL && 489 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 490 res = igrab(delegation->inode); 491 } 492 spin_unlock(&delegation->lock); 493 if (res != NULL) 494 break; 495 } 496 rcu_read_unlock(); 497 return res; 498 } 499 500 /* 501 * Mark all delegations as needing to be reclaimed 502 */ 503 void nfs_delegation_mark_reclaim(struct nfs_client *clp) 504 { 505 struct nfs_delegation *delegation; 506 rcu_read_lock(); 507 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) 508 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 509 rcu_read_unlock(); 510 } 511 512 /* 513 * Reap all unclaimed delegations after reboot recovery is done 514 */ 515 void nfs_delegation_reap_unclaimed(struct nfs_client *clp) 516 { 517 struct nfs_delegation *delegation; 518 struct inode *inode; 519 restart: 520 rcu_read_lock(); 521 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 522 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) 523 continue; 524 inode = nfs_delegation_grab_inode(delegation); 525 if (inode == NULL) 526 continue; 527 spin_lock(&clp->cl_lock); 528 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); 529 spin_unlock(&clp->cl_lock); 530 rcu_read_unlock(); 531 if (delegation != NULL) 532 nfs_free_delegation(delegation); 533 iput(inode); 534 goto restart; 535 } 536 rcu_read_unlock(); 537 } 538 539 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) 540 { 541 struct nfs_inode *nfsi = NFS_I(inode); 542 struct nfs_delegation *delegation; 543 int ret = 0; 544 545 rcu_read_lock(); 546 delegation = rcu_dereference(nfsi->delegation); 547 if (delegation != NULL) { 548 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); 549 ret = 1; 550 } 551 rcu_read_unlock(); 552 return ret; 553 } 554