1 #include "ceph_debug.h" 2 3 #include <linux/fs.h> 4 #include <linux/kernel.h> 5 #include <linux/sched.h> 6 #include <linux/vmalloc.h> 7 #include <linux/wait.h> 8 9 #include "super.h" 10 #include "decode.h" 11 #include "messenger.h" 12 13 /* 14 * Capability management 15 * 16 * The Ceph metadata servers control client access to inode metadata 17 * and file data by issuing capabilities, granting clients permission 18 * to read and/or write both inode field and file data to OSDs 19 * (storage nodes). Each capability consists of a set of bits 20 * indicating which operations are allowed. 21 * 22 * If the client holds a *_SHARED cap, the client has a coherent value 23 * that can be safely read from the cached inode. 24 * 25 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the 26 * client is allowed to change inode attributes (e.g., file size, 27 * mtime), note its dirty state in the ceph_cap, and asynchronously 28 * flush that metadata change to the MDS. 29 * 30 * In the event of a conflicting operation (perhaps by another 31 * client), the MDS will revoke the conflicting client capabilities. 32 * 33 * In order for a client to cache an inode, it must hold a capability 34 * with at least one MDS server. When inodes are released, release 35 * notifications are batched and periodically sent en masse to the MDS 36 * cluster to release server state. 37 */ 38 39 40 /* 41 * Generate readable cap strings for debugging output. 42 */ 43 #define MAX_CAP_STR 20 44 static char cap_str[MAX_CAP_STR][40]; 45 static DEFINE_SPINLOCK(cap_str_lock); 46 static int last_cap_str; 47 48 static char *gcap_string(char *s, int c) 49 { 50 if (c & CEPH_CAP_GSHARED) 51 *s++ = 's'; 52 if (c & CEPH_CAP_GEXCL) 53 *s++ = 'x'; 54 if (c & CEPH_CAP_GCACHE) 55 *s++ = 'c'; 56 if (c & CEPH_CAP_GRD) 57 *s++ = 'r'; 58 if (c & CEPH_CAP_GWR) 59 *s++ = 'w'; 60 if (c & CEPH_CAP_GBUFFER) 61 *s++ = 'b'; 62 if (c & CEPH_CAP_GLAZYIO) 63 *s++ = 'l'; 64 return s; 65 } 66 67 const char *ceph_cap_string(int caps) 68 { 69 int i; 70 char *s; 71 int c; 72 73 spin_lock(&cap_str_lock); 74 i = last_cap_str++; 75 if (last_cap_str == MAX_CAP_STR) 76 last_cap_str = 0; 77 spin_unlock(&cap_str_lock); 78 79 s = cap_str[i]; 80 81 if (caps & CEPH_CAP_PIN) 82 *s++ = 'p'; 83 84 c = (caps >> CEPH_CAP_SAUTH) & 3; 85 if (c) { 86 *s++ = 'A'; 87 s = gcap_string(s, c); 88 } 89 90 c = (caps >> CEPH_CAP_SLINK) & 3; 91 if (c) { 92 *s++ = 'L'; 93 s = gcap_string(s, c); 94 } 95 96 c = (caps >> CEPH_CAP_SXATTR) & 3; 97 if (c) { 98 *s++ = 'X'; 99 s = gcap_string(s, c); 100 } 101 102 c = caps >> CEPH_CAP_SFILE; 103 if (c) { 104 *s++ = 'F'; 105 s = gcap_string(s, c); 106 } 107 108 if (s == cap_str[i]) 109 *s++ = '-'; 110 *s = 0; 111 return cap_str[i]; 112 } 113 114 /* 115 * Cap reservations 116 * 117 * Maintain a global pool of preallocated struct ceph_caps, referenced 118 * by struct ceph_caps_reservations. This ensures that we preallocate 119 * memory needed to successfully process an MDS response. (If an MDS 120 * sends us cap information and we fail to process it, we will have 121 * problems due to the client and MDS being out of sync.) 122 * 123 * Reservations are 'owned' by a ceph_cap_reservation context. 124 */ 125 static spinlock_t caps_list_lock; 126 static struct list_head caps_list; /* unused (reserved or unreserved) */ 127 static int caps_total_count; /* total caps allocated */ 128 static int caps_use_count; /* in use */ 129 static int caps_reserve_count; /* unused, reserved */ 130 static int caps_avail_count; /* unused, unreserved */ 131 static int caps_min_count; /* keep at least this many (unreserved) */ 132 133 void __init ceph_caps_init(void) 134 { 135 INIT_LIST_HEAD(&caps_list); 136 spin_lock_init(&caps_list_lock); 137 } 138 139 void ceph_caps_finalize(void) 140 { 141 struct ceph_cap *cap; 142 143 spin_lock(&caps_list_lock); 144 while (!list_empty(&caps_list)) { 145 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item); 146 list_del(&cap->caps_item); 147 kmem_cache_free(ceph_cap_cachep, cap); 148 } 149 caps_total_count = 0; 150 caps_avail_count = 0; 151 caps_use_count = 0; 152 caps_reserve_count = 0; 153 caps_min_count = 0; 154 spin_unlock(&caps_list_lock); 155 } 156 157 void ceph_adjust_min_caps(int delta) 158 { 159 spin_lock(&caps_list_lock); 160 caps_min_count += delta; 161 BUG_ON(caps_min_count < 0); 162 spin_unlock(&caps_list_lock); 163 } 164 165 int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need) 166 { 167 int i; 168 struct ceph_cap *cap; 169 int have; 170 int alloc = 0; 171 LIST_HEAD(newcaps); 172 int ret = 0; 173 174 dout("reserve caps ctx=%p need=%d\n", ctx, need); 175 176 /* first reserve any caps that are already allocated */ 177 spin_lock(&caps_list_lock); 178 if (caps_avail_count >= need) 179 have = need; 180 else 181 have = caps_avail_count; 182 caps_avail_count -= have; 183 caps_reserve_count += have; 184 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + 185 caps_avail_count); 186 spin_unlock(&caps_list_lock); 187 188 for (i = have; i < need; i++) { 189 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 190 if (!cap) { 191 ret = -ENOMEM; 192 goto out_alloc_count; 193 } 194 list_add(&cap->caps_item, &newcaps); 195 alloc++; 196 } 197 BUG_ON(have + alloc != need); 198 199 spin_lock(&caps_list_lock); 200 caps_total_count += alloc; 201 caps_reserve_count += alloc; 202 list_splice(&newcaps, &caps_list); 203 204 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + 205 caps_avail_count); 206 spin_unlock(&caps_list_lock); 207 208 ctx->count = need; 209 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", 210 ctx, caps_total_count, caps_use_count, caps_reserve_count, 211 caps_avail_count); 212 return 0; 213 214 out_alloc_count: 215 /* we didn't manage to reserve as much as we needed */ 216 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n", 217 ctx, need, have); 218 return ret; 219 } 220 221 int ceph_unreserve_caps(struct ceph_cap_reservation *ctx) 222 { 223 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); 224 if (ctx->count) { 225 spin_lock(&caps_list_lock); 226 BUG_ON(caps_reserve_count < ctx->count); 227 caps_reserve_count -= ctx->count; 228 caps_avail_count += ctx->count; 229 ctx->count = 0; 230 dout("unreserve caps %d = %d used + %d resv + %d avail\n", 231 caps_total_count, caps_use_count, caps_reserve_count, 232 caps_avail_count); 233 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + 234 caps_avail_count); 235 spin_unlock(&caps_list_lock); 236 } 237 return 0; 238 } 239 240 static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx) 241 { 242 struct ceph_cap *cap = NULL; 243 244 /* temporary, until we do something about cap import/export */ 245 if (!ctx) 246 return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 247 248 spin_lock(&caps_list_lock); 249 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", 250 ctx, ctx->count, caps_total_count, caps_use_count, 251 caps_reserve_count, caps_avail_count); 252 BUG_ON(!ctx->count); 253 BUG_ON(ctx->count > caps_reserve_count); 254 BUG_ON(list_empty(&caps_list)); 255 256 ctx->count--; 257 caps_reserve_count--; 258 caps_use_count++; 259 260 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item); 261 list_del(&cap->caps_item); 262 263 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + 264 caps_avail_count); 265 spin_unlock(&caps_list_lock); 266 return cap; 267 } 268 269 void ceph_put_cap(struct ceph_cap *cap) 270 { 271 spin_lock(&caps_list_lock); 272 dout("put_cap %p %d = %d used + %d resv + %d avail\n", 273 cap, caps_total_count, caps_use_count, 274 caps_reserve_count, caps_avail_count); 275 caps_use_count--; 276 /* 277 * Keep some preallocated caps around (ceph_min_count), to 278 * avoid lots of free/alloc churn. 279 */ 280 if (caps_avail_count >= caps_reserve_count + caps_min_count) { 281 caps_total_count--; 282 kmem_cache_free(ceph_cap_cachep, cap); 283 } else { 284 caps_avail_count++; 285 list_add(&cap->caps_item, &caps_list); 286 } 287 288 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + 289 caps_avail_count); 290 spin_unlock(&caps_list_lock); 291 } 292 293 void ceph_reservation_status(struct ceph_client *client, 294 int *total, int *avail, int *used, int *reserved, 295 int *min) 296 { 297 if (total) 298 *total = caps_total_count; 299 if (avail) 300 *avail = caps_avail_count; 301 if (used) 302 *used = caps_use_count; 303 if (reserved) 304 *reserved = caps_reserve_count; 305 if (min) 306 *min = caps_min_count; 307 } 308 309 /* 310 * Find ceph_cap for given mds, if any. 311 * 312 * Called with i_lock held. 313 */ 314 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) 315 { 316 struct ceph_cap *cap; 317 struct rb_node *n = ci->i_caps.rb_node; 318 319 while (n) { 320 cap = rb_entry(n, struct ceph_cap, ci_node); 321 if (mds < cap->mds) 322 n = n->rb_left; 323 else if (mds > cap->mds) 324 n = n->rb_right; 325 else 326 return cap; 327 } 328 return NULL; 329 } 330 331 /* 332 * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else 333 * -1. 334 */ 335 static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq) 336 { 337 struct ceph_cap *cap; 338 int mds = -1; 339 struct rb_node *p; 340 341 /* prefer mds with WR|WRBUFFER|EXCL caps */ 342 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 343 cap = rb_entry(p, struct ceph_cap, ci_node); 344 mds = cap->mds; 345 if (mseq) 346 *mseq = cap->mseq; 347 if (cap->issued & (CEPH_CAP_FILE_WR | 348 CEPH_CAP_FILE_BUFFER | 349 CEPH_CAP_FILE_EXCL)) 350 break; 351 } 352 return mds; 353 } 354 355 int ceph_get_cap_mds(struct inode *inode) 356 { 357 int mds; 358 spin_lock(&inode->i_lock); 359 mds = __ceph_get_cap_mds(ceph_inode(inode), NULL); 360 spin_unlock(&inode->i_lock); 361 return mds; 362 } 363 364 /* 365 * Called under i_lock. 366 */ 367 static void __insert_cap_node(struct ceph_inode_info *ci, 368 struct ceph_cap *new) 369 { 370 struct rb_node **p = &ci->i_caps.rb_node; 371 struct rb_node *parent = NULL; 372 struct ceph_cap *cap = NULL; 373 374 while (*p) { 375 parent = *p; 376 cap = rb_entry(parent, struct ceph_cap, ci_node); 377 if (new->mds < cap->mds) 378 p = &(*p)->rb_left; 379 else if (new->mds > cap->mds) 380 p = &(*p)->rb_right; 381 else 382 BUG(); 383 } 384 385 rb_link_node(&new->ci_node, parent, p); 386 rb_insert_color(&new->ci_node, &ci->i_caps); 387 } 388 389 /* 390 * (re)set cap hold timeouts, which control the delayed release 391 * of unused caps back to the MDS. Should be called on cap use. 392 */ 393 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 394 struct ceph_inode_info *ci) 395 { 396 struct ceph_mount_args *ma = mdsc->client->mount_args; 397 398 ci->i_hold_caps_min = round_jiffies(jiffies + 399 ma->caps_wanted_delay_min * HZ); 400 ci->i_hold_caps_max = round_jiffies(jiffies + 401 ma->caps_wanted_delay_max * HZ); 402 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, 403 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); 404 } 405 406 /* 407 * (Re)queue cap at the end of the delayed cap release list. 408 * 409 * If I_FLUSH is set, leave the inode at the front of the list. 410 * 411 * Caller holds i_lock 412 * -> we take mdsc->cap_delay_lock 413 */ 414 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 415 struct ceph_inode_info *ci) 416 { 417 __cap_set_timeouts(mdsc, ci); 418 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, 419 ci->i_ceph_flags, ci->i_hold_caps_max); 420 if (!mdsc->stopping) { 421 spin_lock(&mdsc->cap_delay_lock); 422 if (!list_empty(&ci->i_cap_delay_list)) { 423 if (ci->i_ceph_flags & CEPH_I_FLUSH) 424 goto no_change; 425 list_del_init(&ci->i_cap_delay_list); 426 } 427 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 428 no_change: 429 spin_unlock(&mdsc->cap_delay_lock); 430 } 431 } 432 433 /* 434 * Queue an inode for immediate writeback. Mark inode with I_FLUSH, 435 * indicating we should send a cap message to flush dirty metadata 436 * asap, and move to the front of the delayed cap list. 437 */ 438 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, 439 struct ceph_inode_info *ci) 440 { 441 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); 442 spin_lock(&mdsc->cap_delay_lock); 443 ci->i_ceph_flags |= CEPH_I_FLUSH; 444 if (!list_empty(&ci->i_cap_delay_list)) 445 list_del_init(&ci->i_cap_delay_list); 446 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 447 spin_unlock(&mdsc->cap_delay_lock); 448 } 449 450 /* 451 * Cancel delayed work on cap. 452 * 453 * Caller must hold i_lock. 454 */ 455 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 456 struct ceph_inode_info *ci) 457 { 458 dout("__cap_delay_cancel %p\n", &ci->vfs_inode); 459 if (list_empty(&ci->i_cap_delay_list)) 460 return; 461 spin_lock(&mdsc->cap_delay_lock); 462 list_del_init(&ci->i_cap_delay_list); 463 spin_unlock(&mdsc->cap_delay_lock); 464 } 465 466 /* 467 * Common issue checks for add_cap, handle_cap_grant. 468 */ 469 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, 470 unsigned issued) 471 { 472 unsigned had = __ceph_caps_issued(ci, NULL); 473 474 /* 475 * Each time we receive FILE_CACHE anew, we increment 476 * i_rdcache_gen. 477 */ 478 if ((issued & CEPH_CAP_FILE_CACHE) && 479 (had & CEPH_CAP_FILE_CACHE) == 0) 480 ci->i_rdcache_gen++; 481 482 /* 483 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we 484 * don't know what happened to this directory while we didn't 485 * have the cap. 486 */ 487 if ((issued & CEPH_CAP_FILE_SHARED) && 488 (had & CEPH_CAP_FILE_SHARED) == 0) { 489 ci->i_shared_gen++; 490 if (S_ISDIR(ci->vfs_inode.i_mode)) { 491 dout(" marking %p NOT complete\n", &ci->vfs_inode); 492 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 493 } 494 } 495 } 496 497 /* 498 * Add a capability under the given MDS session. 499 * 500 * Caller should hold session snap_rwsem (read) and s_mutex. 501 * 502 * @fmode is the open file mode, if we are opening a file, otherwise 503 * it is < 0. (This is so we can atomically add the cap and add an 504 * open file reference to it.) 505 */ 506 int ceph_add_cap(struct inode *inode, 507 struct ceph_mds_session *session, u64 cap_id, 508 int fmode, unsigned issued, unsigned wanted, 509 unsigned seq, unsigned mseq, u64 realmino, int flags, 510 struct ceph_cap_reservation *caps_reservation) 511 { 512 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 513 struct ceph_inode_info *ci = ceph_inode(inode); 514 struct ceph_cap *new_cap = NULL; 515 struct ceph_cap *cap; 516 int mds = session->s_mds; 517 int actual_wanted; 518 519 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, 520 session->s_mds, cap_id, ceph_cap_string(issued), seq); 521 522 /* 523 * If we are opening the file, include file mode wanted bits 524 * in wanted. 525 */ 526 if (fmode >= 0) 527 wanted |= ceph_caps_for_mode(fmode); 528 529 retry: 530 spin_lock(&inode->i_lock); 531 cap = __get_cap_for_mds(ci, mds); 532 if (!cap) { 533 if (new_cap) { 534 cap = new_cap; 535 new_cap = NULL; 536 } else { 537 spin_unlock(&inode->i_lock); 538 new_cap = get_cap(caps_reservation); 539 if (new_cap == NULL) 540 return -ENOMEM; 541 goto retry; 542 } 543 544 cap->issued = 0; 545 cap->implemented = 0; 546 cap->mds = mds; 547 cap->mds_wanted = 0; 548 549 cap->ci = ci; 550 __insert_cap_node(ci, cap); 551 552 /* clear out old exporting info? (i.e. on cap import) */ 553 if (ci->i_cap_exporting_mds == mds) { 554 ci->i_cap_exporting_issued = 0; 555 ci->i_cap_exporting_mseq = 0; 556 ci->i_cap_exporting_mds = -1; 557 } 558 559 /* add to session cap list */ 560 cap->session = session; 561 spin_lock(&session->s_cap_lock); 562 list_add_tail(&cap->session_caps, &session->s_caps); 563 session->s_nr_caps++; 564 spin_unlock(&session->s_cap_lock); 565 } 566 567 if (!ci->i_snap_realm) { 568 /* 569 * add this inode to the appropriate snap realm 570 */ 571 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, 572 realmino); 573 if (realm) { 574 ceph_get_snap_realm(mdsc, realm); 575 spin_lock(&realm->inodes_with_caps_lock); 576 ci->i_snap_realm = realm; 577 list_add(&ci->i_snap_realm_item, 578 &realm->inodes_with_caps); 579 spin_unlock(&realm->inodes_with_caps_lock); 580 } else { 581 pr_err("ceph_add_cap: couldn't find snap realm %llx\n", 582 realmino); 583 } 584 } 585 586 __check_cap_issue(ci, cap, issued); 587 588 /* 589 * If we are issued caps we don't want, or the mds' wanted 590 * value appears to be off, queue a check so we'll release 591 * later and/or update the mds wanted value. 592 */ 593 actual_wanted = __ceph_caps_wanted(ci); 594 if ((wanted & ~actual_wanted) || 595 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { 596 dout(" issued %s, mds wanted %s, actual %s, queueing\n", 597 ceph_cap_string(issued), ceph_cap_string(wanted), 598 ceph_cap_string(actual_wanted)); 599 __cap_delay_requeue(mdsc, ci); 600 } 601 602 if (flags & CEPH_CAP_FLAG_AUTH) 603 ci->i_auth_cap = cap; 604 else if (ci->i_auth_cap == cap) 605 ci->i_auth_cap = NULL; 606 607 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", 608 inode, ceph_vinop(inode), cap, ceph_cap_string(issued), 609 ceph_cap_string(issued|cap->issued), seq, mds); 610 cap->cap_id = cap_id; 611 cap->issued = issued; 612 cap->implemented |= issued; 613 cap->mds_wanted |= wanted; 614 cap->seq = seq; 615 cap->issue_seq = seq; 616 cap->mseq = mseq; 617 cap->cap_gen = session->s_cap_gen; 618 619 if (fmode >= 0) 620 __ceph_get_fmode(ci, fmode); 621 spin_unlock(&inode->i_lock); 622 wake_up(&ci->i_cap_wq); 623 return 0; 624 } 625 626 /* 627 * Return true if cap has not timed out and belongs to the current 628 * generation of the MDS session (i.e. has not gone 'stale' due to 629 * us losing touch with the mds). 630 */ 631 static int __cap_is_valid(struct ceph_cap *cap) 632 { 633 unsigned long ttl; 634 u32 gen; 635 636 spin_lock(&cap->session->s_cap_lock); 637 gen = cap->session->s_cap_gen; 638 ttl = cap->session->s_cap_ttl; 639 spin_unlock(&cap->session->s_cap_lock); 640 641 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 642 dout("__cap_is_valid %p cap %p issued %s " 643 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, 644 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); 645 return 0; 646 } 647 648 return 1; 649 } 650 651 /* 652 * Return set of valid cap bits issued to us. Note that caps time 653 * out, and may be invalidated in bulk if the client session times out 654 * and session->s_cap_gen is bumped. 655 */ 656 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) 657 { 658 int have = ci->i_snap_caps; 659 struct ceph_cap *cap; 660 struct rb_node *p; 661 662 if (implemented) 663 *implemented = 0; 664 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 665 cap = rb_entry(p, struct ceph_cap, ci_node); 666 if (!__cap_is_valid(cap)) 667 continue; 668 dout("__ceph_caps_issued %p cap %p issued %s\n", 669 &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); 670 have |= cap->issued; 671 if (implemented) 672 *implemented |= cap->implemented; 673 } 674 return have; 675 } 676 677 /* 678 * Get cap bits issued by caps other than @ocap 679 */ 680 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) 681 { 682 int have = ci->i_snap_caps; 683 struct ceph_cap *cap; 684 struct rb_node *p; 685 686 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 687 cap = rb_entry(p, struct ceph_cap, ci_node); 688 if (cap == ocap) 689 continue; 690 if (!__cap_is_valid(cap)) 691 continue; 692 have |= cap->issued; 693 } 694 return have; 695 } 696 697 /* 698 * Move a cap to the end of the LRU (oldest caps at list head, newest 699 * at list tail). 700 */ 701 static void __touch_cap(struct ceph_cap *cap) 702 { 703 struct ceph_mds_session *s = cap->session; 704 705 spin_lock(&s->s_cap_lock); 706 if (s->s_cap_iterator == NULL) { 707 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, 708 s->s_mds); 709 list_move_tail(&cap->session_caps, &s->s_caps); 710 } else { 711 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", 712 &cap->ci->vfs_inode, cap, s->s_mds); 713 } 714 spin_unlock(&s->s_cap_lock); 715 } 716 717 /* 718 * Check if we hold the given mask. If so, move the cap(s) to the 719 * front of their respective LRUs. (This is the preferred way for 720 * callers to check for caps they want.) 721 */ 722 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) 723 { 724 struct ceph_cap *cap; 725 struct rb_node *p; 726 int have = ci->i_snap_caps; 727 728 if ((have & mask) == mask) { 729 dout("__ceph_caps_issued_mask %p snap issued %s" 730 " (mask %s)\n", &ci->vfs_inode, 731 ceph_cap_string(have), 732 ceph_cap_string(mask)); 733 return 1; 734 } 735 736 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 737 cap = rb_entry(p, struct ceph_cap, ci_node); 738 if (!__cap_is_valid(cap)) 739 continue; 740 if ((cap->issued & mask) == mask) { 741 dout("__ceph_caps_issued_mask %p cap %p issued %s" 742 " (mask %s)\n", &ci->vfs_inode, cap, 743 ceph_cap_string(cap->issued), 744 ceph_cap_string(mask)); 745 if (touch) 746 __touch_cap(cap); 747 return 1; 748 } 749 750 /* does a combination of caps satisfy mask? */ 751 have |= cap->issued; 752 if ((have & mask) == mask) { 753 dout("__ceph_caps_issued_mask %p combo issued %s" 754 " (mask %s)\n", &ci->vfs_inode, 755 ceph_cap_string(cap->issued), 756 ceph_cap_string(mask)); 757 if (touch) { 758 struct rb_node *q; 759 760 /* touch this + preceeding caps */ 761 __touch_cap(cap); 762 for (q = rb_first(&ci->i_caps); q != p; 763 q = rb_next(q)) { 764 cap = rb_entry(q, struct ceph_cap, 765 ci_node); 766 if (!__cap_is_valid(cap)) 767 continue; 768 __touch_cap(cap); 769 } 770 } 771 return 1; 772 } 773 } 774 775 return 0; 776 } 777 778 /* 779 * Return true if mask caps are currently being revoked by an MDS. 780 */ 781 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) 782 { 783 struct inode *inode = &ci->vfs_inode; 784 struct ceph_cap *cap; 785 struct rb_node *p; 786 int ret = 0; 787 788 spin_lock(&inode->i_lock); 789 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 790 cap = rb_entry(p, struct ceph_cap, ci_node); 791 if (__cap_is_valid(cap) && 792 (cap->implemented & ~cap->issued & mask)) { 793 ret = 1; 794 break; 795 } 796 } 797 spin_unlock(&inode->i_lock); 798 dout("ceph_caps_revoking %p %s = %d\n", inode, 799 ceph_cap_string(mask), ret); 800 return ret; 801 } 802 803 int __ceph_caps_used(struct ceph_inode_info *ci) 804 { 805 int used = 0; 806 if (ci->i_pin_ref) 807 used |= CEPH_CAP_PIN; 808 if (ci->i_rd_ref) 809 used |= CEPH_CAP_FILE_RD; 810 if (ci->i_rdcache_ref || ci->i_rdcache_gen) 811 used |= CEPH_CAP_FILE_CACHE; 812 if (ci->i_wr_ref) 813 used |= CEPH_CAP_FILE_WR; 814 if (ci->i_wrbuffer_ref) 815 used |= CEPH_CAP_FILE_BUFFER; 816 return used; 817 } 818 819 /* 820 * wanted, by virtue of open file modes 821 */ 822 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) 823 { 824 int want = 0; 825 int mode; 826 for (mode = 0; mode < 4; mode++) 827 if (ci->i_nr_by_mode[mode]) 828 want |= ceph_caps_for_mode(mode); 829 return want; 830 } 831 832 /* 833 * Return caps we have registered with the MDS(s) as 'wanted'. 834 */ 835 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) 836 { 837 struct ceph_cap *cap; 838 struct rb_node *p; 839 int mds_wanted = 0; 840 841 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 842 cap = rb_entry(p, struct ceph_cap, ci_node); 843 if (!__cap_is_valid(cap)) 844 continue; 845 mds_wanted |= cap->mds_wanted; 846 } 847 return mds_wanted; 848 } 849 850 /* 851 * called under i_lock 852 */ 853 static int __ceph_is_any_caps(struct ceph_inode_info *ci) 854 { 855 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0; 856 } 857 858 /* 859 * caller should hold i_lock, and session s_mutex. 860 * returns true if this is the last cap. if so, caller should iput. 861 */ 862 void __ceph_remove_cap(struct ceph_cap *cap) 863 { 864 struct ceph_mds_session *session = cap->session; 865 struct ceph_inode_info *ci = cap->ci; 866 struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; 867 868 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); 869 870 /* remove from inode list */ 871 rb_erase(&cap->ci_node, &ci->i_caps); 872 cap->ci = NULL; 873 if (ci->i_auth_cap == cap) 874 ci->i_auth_cap = NULL; 875 876 /* remove from session list */ 877 spin_lock(&session->s_cap_lock); 878 if (session->s_cap_iterator == cap) { 879 /* not yet, we are iterating over this very cap */ 880 dout("__ceph_remove_cap delaying %p removal from session %p\n", 881 cap, cap->session); 882 } else { 883 list_del_init(&cap->session_caps); 884 session->s_nr_caps--; 885 cap->session = NULL; 886 } 887 spin_unlock(&session->s_cap_lock); 888 889 if (cap->session == NULL) 890 ceph_put_cap(cap); 891 892 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { 893 struct ceph_snap_realm *realm = ci->i_snap_realm; 894 spin_lock(&realm->inodes_with_caps_lock); 895 list_del_init(&ci->i_snap_realm_item); 896 ci->i_snap_realm_counter++; 897 ci->i_snap_realm = NULL; 898 spin_unlock(&realm->inodes_with_caps_lock); 899 ceph_put_snap_realm(mdsc, realm); 900 } 901 if (!__ceph_is_any_real_caps(ci)) 902 __cap_delay_cancel(mdsc, ci); 903 } 904 905 /* 906 * Build and send a cap message to the given MDS. 907 * 908 * Caller should be holding s_mutex. 909 */ 910 static int send_cap_msg(struct ceph_mds_session *session, 911 u64 ino, u64 cid, int op, 912 int caps, int wanted, int dirty, 913 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq, 914 u64 size, u64 max_size, 915 struct timespec *mtime, struct timespec *atime, 916 u64 time_warp_seq, 917 uid_t uid, gid_t gid, mode_t mode, 918 u64 xattr_version, 919 struct ceph_buffer *xattrs_buf, 920 u64 follows) 921 { 922 struct ceph_mds_caps *fc; 923 struct ceph_msg *msg; 924 925 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s" 926 " seq %u/%u mseq %u follows %lld size %llu/%llu" 927 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op), 928 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted), 929 ceph_cap_string(dirty), 930 seq, issue_seq, mseq, follows, size, max_size, 931 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); 932 933 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL); 934 if (IS_ERR(msg)) 935 return PTR_ERR(msg); 936 937 msg->hdr.tid = cpu_to_le64(flush_tid); 938 939 fc = msg->front.iov_base; 940 memset(fc, 0, sizeof(*fc)); 941 942 fc->cap_id = cpu_to_le64(cid); 943 fc->op = cpu_to_le32(op); 944 fc->seq = cpu_to_le32(seq); 945 fc->issue_seq = cpu_to_le32(issue_seq); 946 fc->migrate_seq = cpu_to_le32(mseq); 947 fc->caps = cpu_to_le32(caps); 948 fc->wanted = cpu_to_le32(wanted); 949 fc->dirty = cpu_to_le32(dirty); 950 fc->ino = cpu_to_le64(ino); 951 fc->snap_follows = cpu_to_le64(follows); 952 953 fc->size = cpu_to_le64(size); 954 fc->max_size = cpu_to_le64(max_size); 955 if (mtime) 956 ceph_encode_timespec(&fc->mtime, mtime); 957 if (atime) 958 ceph_encode_timespec(&fc->atime, atime); 959 fc->time_warp_seq = cpu_to_le32(time_warp_seq); 960 961 fc->uid = cpu_to_le32(uid); 962 fc->gid = cpu_to_le32(gid); 963 fc->mode = cpu_to_le32(mode); 964 965 fc->xattr_version = cpu_to_le64(xattr_version); 966 if (xattrs_buf) { 967 msg->middle = ceph_buffer_get(xattrs_buf); 968 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len); 969 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len); 970 } 971 972 ceph_con_send(&session->s_con, msg); 973 return 0; 974 } 975 976 /* 977 * Queue cap releases when an inode is dropped from our 978 * cache. 979 */ 980 void ceph_queue_caps_release(struct inode *inode) 981 { 982 struct ceph_inode_info *ci = ceph_inode(inode); 983 struct rb_node *p; 984 985 spin_lock(&inode->i_lock); 986 p = rb_first(&ci->i_caps); 987 while (p) { 988 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 989 struct ceph_mds_session *session = cap->session; 990 struct ceph_msg *msg; 991 struct ceph_mds_cap_release *head; 992 struct ceph_mds_cap_item *item; 993 994 spin_lock(&session->s_cap_lock); 995 BUG_ON(!session->s_num_cap_releases); 996 msg = list_first_entry(&session->s_cap_releases, 997 struct ceph_msg, list_head); 998 999 dout(" adding %p release to mds%d msg %p (%d left)\n", 1000 inode, session->s_mds, msg, session->s_num_cap_releases); 1001 1002 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); 1003 head = msg->front.iov_base; 1004 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); 1005 item = msg->front.iov_base + msg->front.iov_len; 1006 item->ino = cpu_to_le64(ceph_ino(inode)); 1007 item->cap_id = cpu_to_le64(cap->cap_id); 1008 item->migrate_seq = cpu_to_le32(cap->mseq); 1009 item->seq = cpu_to_le32(cap->issue_seq); 1010 1011 session->s_num_cap_releases--; 1012 1013 msg->front.iov_len += sizeof(*item); 1014 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { 1015 dout(" release msg %p full\n", msg); 1016 list_move_tail(&msg->list_head, 1017 &session->s_cap_releases_done); 1018 } else { 1019 dout(" release msg %p at %d/%d (%d)\n", msg, 1020 (int)le32_to_cpu(head->num), 1021 (int)CEPH_CAPS_PER_RELEASE, 1022 (int)msg->front.iov_len); 1023 } 1024 spin_unlock(&session->s_cap_lock); 1025 p = rb_next(p); 1026 __ceph_remove_cap(cap); 1027 1028 } 1029 spin_unlock(&inode->i_lock); 1030 } 1031 1032 /* 1033 * Send a cap msg on the given inode. Update our caps state, then 1034 * drop i_lock and send the message. 1035 * 1036 * Make note of max_size reported/requested from mds, revoked caps 1037 * that have now been implemented. 1038 * 1039 * Make half-hearted attempt ot to invalidate page cache if we are 1040 * dropping RDCACHE. Note that this will leave behind locked pages 1041 * that we'll then need to deal with elsewhere. 1042 * 1043 * Return non-zero if delayed release, or we experienced an error 1044 * such that the caller should requeue + retry later. 1045 * 1046 * called with i_lock, then drops it. 1047 * caller should hold snap_rwsem (read), s_mutex. 1048 */ 1049 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, 1050 int op, int used, int want, int retain, int flushing, 1051 unsigned *pflush_tid) 1052 __releases(cap->ci->vfs_inode->i_lock) 1053 { 1054 struct ceph_inode_info *ci = cap->ci; 1055 struct inode *inode = &ci->vfs_inode; 1056 u64 cap_id = cap->cap_id; 1057 int held, revoking, dropping, keep; 1058 u64 seq, issue_seq, mseq, time_warp_seq, follows; 1059 u64 size, max_size; 1060 struct timespec mtime, atime; 1061 int wake = 0; 1062 mode_t mode; 1063 uid_t uid; 1064 gid_t gid; 1065 struct ceph_mds_session *session; 1066 u64 xattr_version = 0; 1067 int delayed = 0; 1068 u64 flush_tid = 0; 1069 int i; 1070 int ret; 1071 1072 held = cap->issued | cap->implemented; 1073 revoking = cap->implemented & ~cap->issued; 1074 retain &= ~revoking; 1075 dropping = cap->issued & ~retain; 1076 1077 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", 1078 inode, cap, cap->session, 1079 ceph_cap_string(held), ceph_cap_string(held & retain), 1080 ceph_cap_string(revoking)); 1081 BUG_ON((retain & CEPH_CAP_PIN) == 0); 1082 1083 session = cap->session; 1084 1085 /* don't release wanted unless we've waited a bit. */ 1086 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && 1087 time_before(jiffies, ci->i_hold_caps_min)) { 1088 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n", 1089 ceph_cap_string(cap->issued), 1090 ceph_cap_string(cap->issued & retain), 1091 ceph_cap_string(cap->mds_wanted), 1092 ceph_cap_string(want)); 1093 want |= cap->mds_wanted; 1094 retain |= cap->issued; 1095 delayed = 1; 1096 } 1097 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); 1098 1099 cap->issued &= retain; /* drop bits we don't want */ 1100 if (cap->implemented & ~cap->issued) { 1101 /* 1102 * Wake up any waiters on wanted -> needed transition. 1103 * This is due to the weird transition from buffered 1104 * to sync IO... we need to flush dirty pages _before_ 1105 * allowing sync writes to avoid reordering. 1106 */ 1107 wake = 1; 1108 } 1109 cap->implemented &= cap->issued | used; 1110 cap->mds_wanted = want; 1111 1112 if (flushing) { 1113 /* 1114 * assign a tid for flush operations so we can avoid 1115 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark 1116 * clean type races. track latest tid for every bit 1117 * so we can handle flush AxFw, flush Fw, and have the 1118 * first ack clean Ax. 1119 */ 1120 flush_tid = ++ci->i_cap_flush_last_tid; 1121 if (pflush_tid) 1122 *pflush_tid = flush_tid; 1123 dout(" cap_flush_tid %d\n", (int)flush_tid); 1124 for (i = 0; i < CEPH_CAP_BITS; i++) 1125 if (flushing & (1 << i)) 1126 ci->i_cap_flush_tid[i] = flush_tid; 1127 } 1128 1129 keep = cap->implemented; 1130 seq = cap->seq; 1131 issue_seq = cap->issue_seq; 1132 mseq = cap->mseq; 1133 size = inode->i_size; 1134 ci->i_reported_size = size; 1135 max_size = ci->i_wanted_max_size; 1136 ci->i_requested_max_size = max_size; 1137 mtime = inode->i_mtime; 1138 atime = inode->i_atime; 1139 time_warp_seq = ci->i_time_warp_seq; 1140 follows = ci->i_snap_realm->cached_context->seq; 1141 uid = inode->i_uid; 1142 gid = inode->i_gid; 1143 mode = inode->i_mode; 1144 1145 if (dropping & CEPH_CAP_XATTR_EXCL) { 1146 __ceph_build_xattrs_blob(ci); 1147 xattr_version = ci->i_xattrs.version + 1; 1148 } 1149 1150 spin_unlock(&inode->i_lock); 1151 1152 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, 1153 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, 1154 size, max_size, &mtime, &atime, time_warp_seq, 1155 uid, gid, mode, 1156 xattr_version, 1157 (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL, 1158 follows); 1159 if (ret < 0) { 1160 dout("error sending cap msg, must requeue %p\n", inode); 1161 delayed = 1; 1162 } 1163 1164 if (wake) 1165 wake_up(&ci->i_cap_wq); 1166 1167 return delayed; 1168 } 1169 1170 /* 1171 * When a snapshot is taken, clients accumulate dirty metadata on 1172 * inodes with capabilities in ceph_cap_snaps to describe the file 1173 * state at the time the snapshot was taken. This must be flushed 1174 * asynchronously back to the MDS once sync writes complete and dirty 1175 * data is written out. 1176 * 1177 * Called under i_lock. Takes s_mutex as needed. 1178 */ 1179 void __ceph_flush_snaps(struct ceph_inode_info *ci, 1180 struct ceph_mds_session **psession) 1181 { 1182 struct inode *inode = &ci->vfs_inode; 1183 int mds; 1184 struct ceph_cap_snap *capsnap; 1185 u32 mseq; 1186 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 1187 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold 1188 session->s_mutex */ 1189 u64 next_follows = 0; /* keep track of how far we've gotten through the 1190 i_cap_snaps list, and skip these entries next time 1191 around to avoid an infinite loop */ 1192 1193 if (psession) 1194 session = *psession; 1195 1196 dout("__flush_snaps %p\n", inode); 1197 retry: 1198 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 1199 /* avoid an infiniute loop after retry */ 1200 if (capsnap->follows < next_follows) 1201 continue; 1202 /* 1203 * we need to wait for sync writes to complete and for dirty 1204 * pages to be written out. 1205 */ 1206 if (capsnap->dirty_pages || capsnap->writing) 1207 continue; 1208 1209 /* pick mds, take s_mutex */ 1210 mds = __ceph_get_cap_mds(ci, &mseq); 1211 if (session && session->s_mds != mds) { 1212 dout("oops, wrong session %p mutex\n", session); 1213 mutex_unlock(&session->s_mutex); 1214 ceph_put_mds_session(session); 1215 session = NULL; 1216 } 1217 if (!session) { 1218 spin_unlock(&inode->i_lock); 1219 mutex_lock(&mdsc->mutex); 1220 session = __ceph_lookup_mds_session(mdsc, mds); 1221 mutex_unlock(&mdsc->mutex); 1222 if (session) { 1223 dout("inverting session/ino locks on %p\n", 1224 session); 1225 mutex_lock(&session->s_mutex); 1226 } 1227 /* 1228 * if session == NULL, we raced against a cap 1229 * deletion. retry, and we'll get a better 1230 * @mds value next time. 1231 */ 1232 spin_lock(&inode->i_lock); 1233 goto retry; 1234 } 1235 1236 capsnap->flush_tid = ++ci->i_cap_flush_last_tid; 1237 atomic_inc(&capsnap->nref); 1238 if (!list_empty(&capsnap->flushing_item)) 1239 list_del_init(&capsnap->flushing_item); 1240 list_add_tail(&capsnap->flushing_item, 1241 &session->s_cap_snaps_flushing); 1242 spin_unlock(&inode->i_lock); 1243 1244 dout("flush_snaps %p cap_snap %p follows %lld size %llu\n", 1245 inode, capsnap, next_follows, capsnap->size); 1246 send_cap_msg(session, ceph_vino(inode).ino, 0, 1247 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, 1248 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, 1249 capsnap->size, 0, 1250 &capsnap->mtime, &capsnap->atime, 1251 capsnap->time_warp_seq, 1252 capsnap->uid, capsnap->gid, capsnap->mode, 1253 0, NULL, 1254 capsnap->follows); 1255 1256 next_follows = capsnap->follows + 1; 1257 ceph_put_cap_snap(capsnap); 1258 1259 spin_lock(&inode->i_lock); 1260 goto retry; 1261 } 1262 1263 /* we flushed them all; remove this inode from the queue */ 1264 spin_lock(&mdsc->snap_flush_lock); 1265 list_del_init(&ci->i_snap_flush_item); 1266 spin_unlock(&mdsc->snap_flush_lock); 1267 1268 if (psession) 1269 *psession = session; 1270 else if (session) { 1271 mutex_unlock(&session->s_mutex); 1272 ceph_put_mds_session(session); 1273 } 1274 } 1275 1276 static void ceph_flush_snaps(struct ceph_inode_info *ci) 1277 { 1278 struct inode *inode = &ci->vfs_inode; 1279 1280 spin_lock(&inode->i_lock); 1281 __ceph_flush_snaps(ci, NULL); 1282 spin_unlock(&inode->i_lock); 1283 } 1284 1285 /* 1286 * Mark caps dirty. If inode is newly dirty, add to the global dirty 1287 * list. 1288 */ 1289 void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) 1290 { 1291 struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; 1292 struct inode *inode = &ci->vfs_inode; 1293 int was = ci->i_dirty_caps; 1294 int dirty = 0; 1295 1296 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, 1297 ceph_cap_string(mask), ceph_cap_string(was), 1298 ceph_cap_string(was | mask)); 1299 ci->i_dirty_caps |= mask; 1300 if (was == 0) { 1301 dout(" inode %p now dirty\n", &ci->vfs_inode); 1302 BUG_ON(!list_empty(&ci->i_dirty_item)); 1303 spin_lock(&mdsc->cap_dirty_lock); 1304 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); 1305 spin_unlock(&mdsc->cap_dirty_lock); 1306 if (ci->i_flushing_caps == 0) { 1307 igrab(inode); 1308 dirty |= I_DIRTY_SYNC; 1309 } 1310 } 1311 BUG_ON(list_empty(&ci->i_dirty_item)); 1312 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && 1313 (mask & CEPH_CAP_FILE_BUFFER)) 1314 dirty |= I_DIRTY_DATASYNC; 1315 if (dirty) 1316 __mark_inode_dirty(inode, dirty); 1317 __cap_delay_requeue(mdsc, ci); 1318 } 1319 1320 /* 1321 * Add dirty inode to the flushing list. Assigned a seq number so we 1322 * can wait for caps to flush without starving. 1323 * 1324 * Called under i_lock. 1325 */ 1326 static int __mark_caps_flushing(struct inode *inode, 1327 struct ceph_mds_session *session) 1328 { 1329 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; 1330 struct ceph_inode_info *ci = ceph_inode(inode); 1331 int flushing; 1332 1333 BUG_ON(ci->i_dirty_caps == 0); 1334 BUG_ON(list_empty(&ci->i_dirty_item)); 1335 1336 flushing = ci->i_dirty_caps; 1337 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", 1338 ceph_cap_string(flushing), 1339 ceph_cap_string(ci->i_flushing_caps), 1340 ceph_cap_string(ci->i_flushing_caps | flushing)); 1341 ci->i_flushing_caps |= flushing; 1342 ci->i_dirty_caps = 0; 1343 dout(" inode %p now !dirty\n", inode); 1344 1345 spin_lock(&mdsc->cap_dirty_lock); 1346 list_del_init(&ci->i_dirty_item); 1347 1348 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; 1349 if (list_empty(&ci->i_flushing_item)) { 1350 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1351 mdsc->num_cap_flushing++; 1352 dout(" inode %p now flushing seq %lld\n", inode, 1353 ci->i_cap_flush_seq); 1354 } else { 1355 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1356 dout(" inode %p now flushing (more) seq %lld\n", inode, 1357 ci->i_cap_flush_seq); 1358 } 1359 spin_unlock(&mdsc->cap_dirty_lock); 1360 1361 return flushing; 1362 } 1363 1364 /* 1365 * try to invalidate mapping pages without blocking. 1366 */ 1367 static int mapping_is_empty(struct address_space *mapping) 1368 { 1369 struct page *page = find_get_page(mapping, 0); 1370 1371 if (!page) 1372 return 1; 1373 1374 put_page(page); 1375 return 0; 1376 } 1377 1378 static int try_nonblocking_invalidate(struct inode *inode) 1379 { 1380 struct ceph_inode_info *ci = ceph_inode(inode); 1381 u32 invalidating_gen = ci->i_rdcache_gen; 1382 1383 spin_unlock(&inode->i_lock); 1384 invalidate_mapping_pages(&inode->i_data, 0, -1); 1385 spin_lock(&inode->i_lock); 1386 1387 if (mapping_is_empty(&inode->i_data) && 1388 invalidating_gen == ci->i_rdcache_gen) { 1389 /* success. */ 1390 dout("try_nonblocking_invalidate %p success\n", inode); 1391 ci->i_rdcache_gen = 0; 1392 ci->i_rdcache_revoking = 0; 1393 return 0; 1394 } 1395 dout("try_nonblocking_invalidate %p failed\n", inode); 1396 return -1; 1397 } 1398 1399 /* 1400 * Swiss army knife function to examine currently used and wanted 1401 * versus held caps. Release, flush, ack revoked caps to mds as 1402 * appropriate. 1403 * 1404 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay 1405 * cap release further. 1406 * CHECK_CAPS_AUTHONLY - we should only check the auth cap 1407 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without 1408 * further delay. 1409 */ 1410 void ceph_check_caps(struct ceph_inode_info *ci, int flags, 1411 struct ceph_mds_session *session) 1412 { 1413 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); 1414 struct ceph_mds_client *mdsc = &client->mdsc; 1415 struct inode *inode = &ci->vfs_inode; 1416 struct ceph_cap *cap; 1417 int file_wanted, used; 1418 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ 1419 int drop_session_lock = session ? 0 : 1; 1420 int issued, implemented, want, retain, revoking, flushing = 0; 1421 int mds = -1; /* keep track of how far we've gone through i_caps list 1422 to avoid an infinite loop on retry */ 1423 struct rb_node *p; 1424 int tried_invalidate = 0; 1425 int delayed = 0, sent = 0, force_requeue = 0, num; 1426 int queue_invalidate = 0; 1427 int is_delayed = flags & CHECK_CAPS_NODELAY; 1428 1429 /* if we are unmounting, flush any unused caps immediately. */ 1430 if (mdsc->stopping) 1431 is_delayed = 1; 1432 1433 spin_lock(&inode->i_lock); 1434 1435 if (ci->i_ceph_flags & CEPH_I_FLUSH) 1436 flags |= CHECK_CAPS_FLUSH; 1437 1438 /* flush snaps first time around only */ 1439 if (!list_empty(&ci->i_cap_snaps)) 1440 __ceph_flush_snaps(ci, &session); 1441 goto retry_locked; 1442 retry: 1443 spin_lock(&inode->i_lock); 1444 retry_locked: 1445 file_wanted = __ceph_caps_file_wanted(ci); 1446 used = __ceph_caps_used(ci); 1447 want = file_wanted | used; 1448 issued = __ceph_caps_issued(ci, &implemented); 1449 revoking = implemented & ~issued; 1450 1451 retain = want | CEPH_CAP_PIN; 1452 if (!mdsc->stopping && inode->i_nlink > 0) { 1453 if (want) { 1454 retain |= CEPH_CAP_ANY; /* be greedy */ 1455 } else { 1456 retain |= CEPH_CAP_ANY_SHARED; 1457 /* 1458 * keep RD only if we didn't have the file open RW, 1459 * because then the mds would revoke it anyway to 1460 * journal max_size=0. 1461 */ 1462 if (ci->i_max_size == 0) 1463 retain |= CEPH_CAP_ANY_RD; 1464 } 1465 } 1466 1467 dout("check_caps %p file_want %s used %s dirty %s flushing %s" 1468 " issued %s revoking %s retain %s %s%s%s\n", inode, 1469 ceph_cap_string(file_wanted), 1470 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), 1471 ceph_cap_string(ci->i_flushing_caps), 1472 ceph_cap_string(issued), ceph_cap_string(revoking), 1473 ceph_cap_string(retain), 1474 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", 1475 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "", 1476 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : ""); 1477 1478 /* 1479 * If we no longer need to hold onto old our caps, and we may 1480 * have cached pages, but don't want them, then try to invalidate. 1481 * If we fail, it's because pages are locked.... try again later. 1482 */ 1483 if ((!is_delayed || mdsc->stopping) && 1484 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ 1485 ci->i_rdcache_gen && /* may have cached pages */ 1486 (file_wanted == 0 || /* no open files */ 1487 (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */ 1488 !tried_invalidate) { 1489 dout("check_caps trying to invalidate on %p\n", inode); 1490 if (try_nonblocking_invalidate(inode) < 0) { 1491 if (revoking & CEPH_CAP_FILE_CACHE) { 1492 dout("check_caps queuing invalidate\n"); 1493 queue_invalidate = 1; 1494 ci->i_rdcache_revoking = ci->i_rdcache_gen; 1495 } else { 1496 dout("check_caps failed to invalidate pages\n"); 1497 /* we failed to invalidate pages. check these 1498 caps again later. */ 1499 force_requeue = 1; 1500 __cap_set_timeouts(mdsc, ci); 1501 } 1502 } 1503 tried_invalidate = 1; 1504 goto retry_locked; 1505 } 1506 1507 num = 0; 1508 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 1509 cap = rb_entry(p, struct ceph_cap, ci_node); 1510 num++; 1511 1512 /* avoid looping forever */ 1513 if (mds >= cap->mds || 1514 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) 1515 continue; 1516 1517 /* NOTE: no side-effects allowed, until we take s_mutex */ 1518 1519 revoking = cap->implemented & ~cap->issued; 1520 if (revoking) 1521 dout(" mds%d revoking %s\n", cap->mds, 1522 ceph_cap_string(revoking)); 1523 1524 if (cap == ci->i_auth_cap && 1525 (cap->issued & CEPH_CAP_FILE_WR)) { 1526 /* request larger max_size from MDS? */ 1527 if (ci->i_wanted_max_size > ci->i_max_size && 1528 ci->i_wanted_max_size > ci->i_requested_max_size) { 1529 dout("requesting new max_size\n"); 1530 goto ack; 1531 } 1532 1533 /* approaching file_max? */ 1534 if ((inode->i_size << 1) >= ci->i_max_size && 1535 (ci->i_reported_size << 1) < ci->i_max_size) { 1536 dout("i_size approaching max_size\n"); 1537 goto ack; 1538 } 1539 } 1540 /* flush anything dirty? */ 1541 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) && 1542 ci->i_dirty_caps) { 1543 dout("flushing dirty caps\n"); 1544 goto ack; 1545 } 1546 1547 /* completed revocation? going down and there are no caps? */ 1548 if (revoking && (revoking & used) == 0) { 1549 dout("completed revocation of %s\n", 1550 ceph_cap_string(cap->implemented & ~cap->issued)); 1551 goto ack; 1552 } 1553 1554 /* want more caps from mds? */ 1555 if (want & ~(cap->mds_wanted | cap->issued)) 1556 goto ack; 1557 1558 /* things we might delay */ 1559 if ((cap->issued & ~retain) == 0 && 1560 cap->mds_wanted == want) 1561 continue; /* nope, all good */ 1562 1563 if (is_delayed) 1564 goto ack; 1565 1566 /* delay? */ 1567 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && 1568 time_before(jiffies, ci->i_hold_caps_max)) { 1569 dout(" delaying issued %s -> %s, wanted %s -> %s\n", 1570 ceph_cap_string(cap->issued), 1571 ceph_cap_string(cap->issued & retain), 1572 ceph_cap_string(cap->mds_wanted), 1573 ceph_cap_string(want)); 1574 delayed++; 1575 continue; 1576 } 1577 1578 ack: 1579 if (session && session != cap->session) { 1580 dout("oops, wrong session %p mutex\n", session); 1581 mutex_unlock(&session->s_mutex); 1582 session = NULL; 1583 } 1584 if (!session) { 1585 session = cap->session; 1586 if (mutex_trylock(&session->s_mutex) == 0) { 1587 dout("inverting session/ino locks on %p\n", 1588 session); 1589 spin_unlock(&inode->i_lock); 1590 if (took_snap_rwsem) { 1591 up_read(&mdsc->snap_rwsem); 1592 took_snap_rwsem = 0; 1593 } 1594 mutex_lock(&session->s_mutex); 1595 goto retry; 1596 } 1597 } 1598 /* take snap_rwsem after session mutex */ 1599 if (!took_snap_rwsem) { 1600 if (down_read_trylock(&mdsc->snap_rwsem) == 0) { 1601 dout("inverting snap/in locks on %p\n", 1602 inode); 1603 spin_unlock(&inode->i_lock); 1604 down_read(&mdsc->snap_rwsem); 1605 took_snap_rwsem = 1; 1606 goto retry; 1607 } 1608 took_snap_rwsem = 1; 1609 } 1610 1611 if (cap == ci->i_auth_cap && ci->i_dirty_caps) 1612 flushing = __mark_caps_flushing(inode, session); 1613 1614 mds = cap->mds; /* remember mds, so we don't repeat */ 1615 sent++; 1616 1617 /* __send_cap drops i_lock */ 1618 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want, 1619 retain, flushing, NULL); 1620 goto retry; /* retake i_lock and restart our cap scan. */ 1621 } 1622 1623 /* 1624 * Reschedule delayed caps release if we delayed anything, 1625 * otherwise cancel. 1626 */ 1627 if (delayed && is_delayed) 1628 force_requeue = 1; /* __send_cap delayed release; requeue */ 1629 if (!delayed && !is_delayed) 1630 __cap_delay_cancel(mdsc, ci); 1631 else if (!is_delayed || force_requeue) 1632 __cap_delay_requeue(mdsc, ci); 1633 1634 spin_unlock(&inode->i_lock); 1635 1636 if (queue_invalidate) 1637 ceph_queue_invalidate(inode); 1638 1639 if (session && drop_session_lock) 1640 mutex_unlock(&session->s_mutex); 1641 if (took_snap_rwsem) 1642 up_read(&mdsc->snap_rwsem); 1643 } 1644 1645 /* 1646 * Try to flush dirty caps back to the auth mds. 1647 */ 1648 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, 1649 unsigned *flush_tid) 1650 { 1651 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; 1652 struct ceph_inode_info *ci = ceph_inode(inode); 1653 int unlock_session = session ? 0 : 1; 1654 int flushing = 0; 1655 1656 retry: 1657 spin_lock(&inode->i_lock); 1658 if (ci->i_dirty_caps && ci->i_auth_cap) { 1659 struct ceph_cap *cap = ci->i_auth_cap; 1660 int used = __ceph_caps_used(ci); 1661 int want = __ceph_caps_wanted(ci); 1662 int delayed; 1663 1664 if (!session) { 1665 spin_unlock(&inode->i_lock); 1666 session = cap->session; 1667 mutex_lock(&session->s_mutex); 1668 goto retry; 1669 } 1670 BUG_ON(session != cap->session); 1671 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) 1672 goto out; 1673 1674 flushing = __mark_caps_flushing(inode, session); 1675 1676 /* __send_cap drops i_lock */ 1677 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, 1678 cap->issued | cap->implemented, flushing, 1679 flush_tid); 1680 if (!delayed) 1681 goto out_unlocked; 1682 1683 spin_lock(&inode->i_lock); 1684 __cap_delay_requeue(mdsc, ci); 1685 } 1686 out: 1687 spin_unlock(&inode->i_lock); 1688 out_unlocked: 1689 if (session && unlock_session) 1690 mutex_unlock(&session->s_mutex); 1691 return flushing; 1692 } 1693 1694 /* 1695 * Return true if we've flushed caps through the given flush_tid. 1696 */ 1697 static int caps_are_flushed(struct inode *inode, unsigned tid) 1698 { 1699 struct ceph_inode_info *ci = ceph_inode(inode); 1700 int dirty, i, ret = 1; 1701 1702 spin_lock(&inode->i_lock); 1703 dirty = __ceph_caps_dirty(ci); 1704 for (i = 0; i < CEPH_CAP_BITS; i++) 1705 if ((ci->i_flushing_caps & (1 << i)) && 1706 ci->i_cap_flush_tid[i] <= tid) { 1707 /* still flushing this bit */ 1708 ret = 0; 1709 break; 1710 } 1711 spin_unlock(&inode->i_lock); 1712 return ret; 1713 } 1714 1715 /* 1716 * Wait on any unsafe replies for the given inode. First wait on the 1717 * newest request, and make that the upper bound. Then, if there are 1718 * more requests, keep waiting on the oldest as long as it is still older 1719 * than the original request. 1720 */ 1721 static void sync_write_wait(struct inode *inode) 1722 { 1723 struct ceph_inode_info *ci = ceph_inode(inode); 1724 struct list_head *head = &ci->i_unsafe_writes; 1725 struct ceph_osd_request *req; 1726 u64 last_tid; 1727 1728 spin_lock(&ci->i_unsafe_lock); 1729 if (list_empty(head)) 1730 goto out; 1731 1732 /* set upper bound as _last_ entry in chain */ 1733 req = list_entry(head->prev, struct ceph_osd_request, 1734 r_unsafe_item); 1735 last_tid = req->r_tid; 1736 1737 do { 1738 ceph_osdc_get_request(req); 1739 spin_unlock(&ci->i_unsafe_lock); 1740 dout("sync_write_wait on tid %llu (until %llu)\n", 1741 req->r_tid, last_tid); 1742 wait_for_completion(&req->r_safe_completion); 1743 spin_lock(&ci->i_unsafe_lock); 1744 ceph_osdc_put_request(req); 1745 1746 /* 1747 * from here on look at first entry in chain, since we 1748 * only want to wait for anything older than last_tid 1749 */ 1750 if (list_empty(head)) 1751 break; 1752 req = list_entry(head->next, struct ceph_osd_request, 1753 r_unsafe_item); 1754 } while (req->r_tid < last_tid); 1755 out: 1756 spin_unlock(&ci->i_unsafe_lock); 1757 } 1758 1759 int ceph_fsync(struct file *file, struct dentry *dentry, int datasync) 1760 { 1761 struct inode *inode = dentry->d_inode; 1762 struct ceph_inode_info *ci = ceph_inode(inode); 1763 unsigned flush_tid; 1764 int ret; 1765 int dirty; 1766 1767 dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); 1768 sync_write_wait(inode); 1769 1770 ret = filemap_write_and_wait(inode->i_mapping); 1771 if (ret < 0) 1772 return ret; 1773 1774 dirty = try_flush_caps(inode, NULL, &flush_tid); 1775 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); 1776 1777 /* 1778 * only wait on non-file metadata writeback (the mds 1779 * can recover size and mtime, so we don't need to 1780 * wait for that) 1781 */ 1782 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { 1783 dout("fsync waiting for flush_tid %u\n", flush_tid); 1784 ret = wait_event_interruptible(ci->i_cap_wq, 1785 caps_are_flushed(inode, flush_tid)); 1786 } 1787 1788 dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); 1789 return ret; 1790 } 1791 1792 /* 1793 * Flush any dirty caps back to the mds. If we aren't asked to wait, 1794 * queue inode for flush but don't do so immediately, because we can 1795 * get by with fewer MDS messages if we wait for data writeback to 1796 * complete first. 1797 */ 1798 int ceph_write_inode(struct inode *inode, int wait) 1799 { 1800 struct ceph_inode_info *ci = ceph_inode(inode); 1801 unsigned flush_tid; 1802 int err = 0; 1803 int dirty; 1804 1805 dout("write_inode %p wait=%d\n", inode, wait); 1806 if (wait) { 1807 dirty = try_flush_caps(inode, NULL, &flush_tid); 1808 if (dirty) 1809 err = wait_event_interruptible(ci->i_cap_wq, 1810 caps_are_flushed(inode, flush_tid)); 1811 } else { 1812 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; 1813 1814 spin_lock(&inode->i_lock); 1815 if (__ceph_caps_dirty(ci)) 1816 __cap_delay_requeue_front(mdsc, ci); 1817 spin_unlock(&inode->i_lock); 1818 } 1819 return err; 1820 } 1821 1822 /* 1823 * After a recovering MDS goes active, we need to resend any caps 1824 * we were flushing. 1825 * 1826 * Caller holds session->s_mutex. 1827 */ 1828 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, 1829 struct ceph_mds_session *session) 1830 { 1831 struct ceph_cap_snap *capsnap; 1832 1833 dout("kick_flushing_capsnaps mds%d\n", session->s_mds); 1834 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing, 1835 flushing_item) { 1836 struct ceph_inode_info *ci = capsnap->ci; 1837 struct inode *inode = &ci->vfs_inode; 1838 struct ceph_cap *cap; 1839 1840 spin_lock(&inode->i_lock); 1841 cap = ci->i_auth_cap; 1842 if (cap && cap->session == session) { 1843 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, 1844 cap, capsnap); 1845 __ceph_flush_snaps(ci, &session); 1846 } else { 1847 pr_err("%p auth cap %p not mds%d ???\n", inode, 1848 cap, session->s_mds); 1849 spin_unlock(&inode->i_lock); 1850 } 1851 } 1852 } 1853 1854 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 1855 struct ceph_mds_session *session) 1856 { 1857 struct ceph_inode_info *ci; 1858 1859 kick_flushing_capsnaps(mdsc, session); 1860 1861 dout("kick_flushing_caps mds%d\n", session->s_mds); 1862 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 1863 struct inode *inode = &ci->vfs_inode; 1864 struct ceph_cap *cap; 1865 int delayed = 0; 1866 1867 spin_lock(&inode->i_lock); 1868 cap = ci->i_auth_cap; 1869 if (cap && cap->session == session) { 1870 dout("kick_flushing_caps %p cap %p %s\n", inode, 1871 cap, ceph_cap_string(ci->i_flushing_caps)); 1872 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, 1873 __ceph_caps_used(ci), 1874 __ceph_caps_wanted(ci), 1875 cap->issued | cap->implemented, 1876 ci->i_flushing_caps, NULL); 1877 if (delayed) { 1878 spin_lock(&inode->i_lock); 1879 __cap_delay_requeue(mdsc, ci); 1880 spin_unlock(&inode->i_lock); 1881 } 1882 } else { 1883 pr_err("%p auth cap %p not mds%d ???\n", inode, 1884 cap, session->s_mds); 1885 spin_unlock(&inode->i_lock); 1886 } 1887 } 1888 } 1889 1890 1891 /* 1892 * Take references to capabilities we hold, so that we don't release 1893 * them to the MDS prematurely. 1894 * 1895 * Protected by i_lock. 1896 */ 1897 static void __take_cap_refs(struct ceph_inode_info *ci, int got) 1898 { 1899 if (got & CEPH_CAP_PIN) 1900 ci->i_pin_ref++; 1901 if (got & CEPH_CAP_FILE_RD) 1902 ci->i_rd_ref++; 1903 if (got & CEPH_CAP_FILE_CACHE) 1904 ci->i_rdcache_ref++; 1905 if (got & CEPH_CAP_FILE_WR) 1906 ci->i_wr_ref++; 1907 if (got & CEPH_CAP_FILE_BUFFER) { 1908 if (ci->i_wrbuffer_ref == 0) 1909 igrab(&ci->vfs_inode); 1910 ci->i_wrbuffer_ref++; 1911 dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", 1912 &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); 1913 } 1914 } 1915 1916 /* 1917 * Try to grab cap references. Specify those refs we @want, and the 1918 * minimal set we @need. Also include the larger offset we are writing 1919 * to (when applicable), and check against max_size here as well. 1920 * Note that caller is responsible for ensuring max_size increases are 1921 * requested from the MDS. 1922 */ 1923 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, 1924 int *got, loff_t endoff, int *check_max, int *err) 1925 { 1926 struct inode *inode = &ci->vfs_inode; 1927 int ret = 0; 1928 int have, implemented; 1929 1930 dout("get_cap_refs %p need %s want %s\n", inode, 1931 ceph_cap_string(need), ceph_cap_string(want)); 1932 spin_lock(&inode->i_lock); 1933 1934 /* make sure we _have_ some caps! */ 1935 if (!__ceph_is_any_caps(ci)) { 1936 dout("get_cap_refs %p no real caps\n", inode); 1937 *err = -EBADF; 1938 ret = 1; 1939 goto out; 1940 } 1941 1942 if (need & CEPH_CAP_FILE_WR) { 1943 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { 1944 dout("get_cap_refs %p endoff %llu > maxsize %llu\n", 1945 inode, endoff, ci->i_max_size); 1946 if (endoff > ci->i_wanted_max_size) { 1947 *check_max = 1; 1948 ret = 1; 1949 } 1950 goto out; 1951 } 1952 /* 1953 * If a sync write is in progress, we must wait, so that we 1954 * can get a final snapshot value for size+mtime. 1955 */ 1956 if (__ceph_have_pending_cap_snap(ci)) { 1957 dout("get_cap_refs %p cap_snap_pending\n", inode); 1958 goto out; 1959 } 1960 } 1961 have = __ceph_caps_issued(ci, &implemented); 1962 1963 /* 1964 * disallow writes while a truncate is pending 1965 */ 1966 if (ci->i_truncate_pending) 1967 have &= ~CEPH_CAP_FILE_WR; 1968 1969 if ((have & need) == need) { 1970 /* 1971 * Look at (implemented & ~have & not) so that we keep waiting 1972 * on transition from wanted -> needed caps. This is needed 1973 * for WRBUFFER|WR -> WR to avoid a new WR sync write from 1974 * going before a prior buffered writeback happens. 1975 */ 1976 int not = want & ~(have & need); 1977 int revoking = implemented & ~have; 1978 dout("get_cap_refs %p have %s but not %s (revoking %s)\n", 1979 inode, ceph_cap_string(have), ceph_cap_string(not), 1980 ceph_cap_string(revoking)); 1981 if ((revoking & not) == 0) { 1982 *got = need | (have & want); 1983 __take_cap_refs(ci, *got); 1984 ret = 1; 1985 } 1986 } else { 1987 dout("get_cap_refs %p have %s needed %s\n", inode, 1988 ceph_cap_string(have), ceph_cap_string(need)); 1989 } 1990 out: 1991 spin_unlock(&inode->i_lock); 1992 dout("get_cap_refs %p ret %d got %s\n", inode, 1993 ret, ceph_cap_string(*got)); 1994 return ret; 1995 } 1996 1997 /* 1998 * Check the offset we are writing up to against our current 1999 * max_size. If necessary, tell the MDS we want to write to 2000 * a larger offset. 2001 */ 2002 static void check_max_size(struct inode *inode, loff_t endoff) 2003 { 2004 struct ceph_inode_info *ci = ceph_inode(inode); 2005 int check = 0; 2006 2007 /* do we need to explicitly request a larger max_size? */ 2008 spin_lock(&inode->i_lock); 2009 if ((endoff >= ci->i_max_size || 2010 endoff > (inode->i_size << 1)) && 2011 endoff > ci->i_wanted_max_size) { 2012 dout("write %p at large endoff %llu, req max_size\n", 2013 inode, endoff); 2014 ci->i_wanted_max_size = endoff; 2015 check = 1; 2016 } 2017 spin_unlock(&inode->i_lock); 2018 if (check) 2019 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2020 } 2021 2022 /* 2023 * Wait for caps, and take cap references. If we can't get a WR cap 2024 * due to a small max_size, make sure we check_max_size (and possibly 2025 * ask the mds) so we don't get hung up indefinitely. 2026 */ 2027 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got, 2028 loff_t endoff) 2029 { 2030 int check_max, ret, err; 2031 2032 retry: 2033 if (endoff > 0) 2034 check_max_size(&ci->vfs_inode, endoff); 2035 check_max = 0; 2036 err = 0; 2037 ret = wait_event_interruptible(ci->i_cap_wq, 2038 try_get_cap_refs(ci, need, want, 2039 got, endoff, 2040 &check_max, &err)); 2041 if (err) 2042 ret = err; 2043 if (check_max) 2044 goto retry; 2045 return ret; 2046 } 2047 2048 /* 2049 * Take cap refs. Caller must already know we hold at least one ref 2050 * on the caps in question or we don't know this is safe. 2051 */ 2052 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) 2053 { 2054 spin_lock(&ci->vfs_inode.i_lock); 2055 __take_cap_refs(ci, caps); 2056 spin_unlock(&ci->vfs_inode.i_lock); 2057 } 2058 2059 /* 2060 * Release cap refs. 2061 * 2062 * If we released the last ref on any given cap, call ceph_check_caps 2063 * to release (or schedule a release). 2064 * 2065 * If we are releasing a WR cap (from a sync write), finalize any affected 2066 * cap_snap, and wake up any waiters. 2067 */ 2068 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) 2069 { 2070 struct inode *inode = &ci->vfs_inode; 2071 int last = 0, put = 0, flushsnaps = 0, wake = 0; 2072 struct ceph_cap_snap *capsnap; 2073 2074 spin_lock(&inode->i_lock); 2075 if (had & CEPH_CAP_PIN) 2076 --ci->i_pin_ref; 2077 if (had & CEPH_CAP_FILE_RD) 2078 if (--ci->i_rd_ref == 0) 2079 last++; 2080 if (had & CEPH_CAP_FILE_CACHE) 2081 if (--ci->i_rdcache_ref == 0) 2082 last++; 2083 if (had & CEPH_CAP_FILE_BUFFER) { 2084 if (--ci->i_wrbuffer_ref == 0) { 2085 last++; 2086 put++; 2087 } 2088 dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", 2089 inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); 2090 } 2091 if (had & CEPH_CAP_FILE_WR) 2092 if (--ci->i_wr_ref == 0) { 2093 last++; 2094 if (!list_empty(&ci->i_cap_snaps)) { 2095 capsnap = list_first_entry(&ci->i_cap_snaps, 2096 struct ceph_cap_snap, 2097 ci_item); 2098 if (capsnap->writing) { 2099 capsnap->writing = 0; 2100 flushsnaps = 2101 __ceph_finish_cap_snap(ci, 2102 capsnap); 2103 wake = 1; 2104 } 2105 } 2106 } 2107 spin_unlock(&inode->i_lock); 2108 2109 dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), 2110 last ? "last" : ""); 2111 2112 if (last && !flushsnaps) 2113 ceph_check_caps(ci, 0, NULL); 2114 else if (flushsnaps) 2115 ceph_flush_snaps(ci); 2116 if (wake) 2117 wake_up(&ci->i_cap_wq); 2118 if (put) 2119 iput(inode); 2120 } 2121 2122 /* 2123 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap 2124 * context. Adjust per-snap dirty page accounting as appropriate. 2125 * Once all dirty data for a cap_snap is flushed, flush snapped file 2126 * metadata back to the MDS. If we dropped the last ref, call 2127 * ceph_check_caps. 2128 */ 2129 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 2130 struct ceph_snap_context *snapc) 2131 { 2132 struct inode *inode = &ci->vfs_inode; 2133 int last = 0; 2134 int last_snap = 0; 2135 int found = 0; 2136 struct ceph_cap_snap *capsnap = NULL; 2137 2138 spin_lock(&inode->i_lock); 2139 ci->i_wrbuffer_ref -= nr; 2140 last = !ci->i_wrbuffer_ref; 2141 2142 if (ci->i_head_snapc == snapc) { 2143 ci->i_wrbuffer_ref_head -= nr; 2144 if (!ci->i_wrbuffer_ref_head) { 2145 ceph_put_snap_context(ci->i_head_snapc); 2146 ci->i_head_snapc = NULL; 2147 } 2148 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", 2149 inode, 2150 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, 2151 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 2152 last ? " LAST" : ""); 2153 } else { 2154 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2155 if (capsnap->context == snapc) { 2156 found = 1; 2157 capsnap->dirty_pages -= nr; 2158 last_snap = !capsnap->dirty_pages; 2159 break; 2160 } 2161 } 2162 BUG_ON(!found); 2163 dout("put_wrbuffer_cap_refs on %p cap_snap %p " 2164 " snap %lld %d/%d -> %d/%d %s%s\n", 2165 inode, capsnap, capsnap->context->seq, 2166 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 2167 ci->i_wrbuffer_ref, capsnap->dirty_pages, 2168 last ? " (wrbuffer last)" : "", 2169 last_snap ? " (capsnap last)" : ""); 2170 } 2171 2172 spin_unlock(&inode->i_lock); 2173 2174 if (last) { 2175 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2176 iput(inode); 2177 } else if (last_snap) { 2178 ceph_flush_snaps(ci); 2179 wake_up(&ci->i_cap_wq); 2180 } 2181 } 2182 2183 /* 2184 * Handle a cap GRANT message from the MDS. (Note that a GRANT may 2185 * actually be a revocation if it specifies a smaller cap set.) 2186 * 2187 * caller holds s_mutex. 2188 * return value: 2189 * 0 - ok 2190 * 1 - check_caps on auth cap only (writeback) 2191 * 2 - check_caps (ack revoke) 2192 */ 2193 static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, 2194 struct ceph_mds_session *session, 2195 struct ceph_cap *cap, 2196 struct ceph_buffer *xattr_buf) 2197 __releases(inode->i_lock) 2198 2199 { 2200 struct ceph_inode_info *ci = ceph_inode(inode); 2201 int mds = session->s_mds; 2202 int seq = le32_to_cpu(grant->seq); 2203 int newcaps = le32_to_cpu(grant->caps); 2204 int issued, implemented, used, wanted, dirty; 2205 u64 size = le64_to_cpu(grant->size); 2206 u64 max_size = le64_to_cpu(grant->max_size); 2207 struct timespec mtime, atime, ctime; 2208 int reply = 0; 2209 int wake = 0; 2210 int writeback = 0; 2211 int revoked_rdcache = 0; 2212 int queue_invalidate = 0; 2213 2214 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 2215 inode, cap, mds, seq, ceph_cap_string(newcaps)); 2216 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 2217 inode->i_size); 2218 2219 /* 2220 * If CACHE is being revoked, and we have no dirty buffers, 2221 * try to invalidate (once). (If there are dirty buffers, we 2222 * will invalidate _after_ writeback.) 2223 */ 2224 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && 2225 !ci->i_wrbuffer_ref) { 2226 if (try_nonblocking_invalidate(inode) == 0) { 2227 revoked_rdcache = 1; 2228 } else { 2229 /* there were locked pages.. invalidate later 2230 in a separate thread. */ 2231 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 2232 queue_invalidate = 1; 2233 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2234 } 2235 } 2236 } 2237 2238 /* side effects now are allowed */ 2239 2240 issued = __ceph_caps_issued(ci, &implemented); 2241 issued |= implemented | __ceph_caps_dirty(ci); 2242 2243 cap->cap_gen = session->s_cap_gen; 2244 2245 __check_cap_issue(ci, cap, newcaps); 2246 2247 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 2248 inode->i_mode = le32_to_cpu(grant->mode); 2249 inode->i_uid = le32_to_cpu(grant->uid); 2250 inode->i_gid = le32_to_cpu(grant->gid); 2251 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 2252 inode->i_uid, inode->i_gid); 2253 } 2254 2255 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 2256 inode->i_nlink = le32_to_cpu(grant->nlink); 2257 2258 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) { 2259 int len = le32_to_cpu(grant->xattr_len); 2260 u64 version = le64_to_cpu(grant->xattr_version); 2261 2262 if (version > ci->i_xattrs.version) { 2263 dout(" got new xattrs v%llu on %p len %d\n", 2264 version, inode, len); 2265 if (ci->i_xattrs.blob) 2266 ceph_buffer_put(ci->i_xattrs.blob); 2267 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); 2268 ci->i_xattrs.version = version; 2269 } 2270 } 2271 2272 /* size/ctime/mtime/atime? */ 2273 ceph_fill_file_size(inode, issued, 2274 le32_to_cpu(grant->truncate_seq), 2275 le64_to_cpu(grant->truncate_size), size); 2276 ceph_decode_timespec(&mtime, &grant->mtime); 2277 ceph_decode_timespec(&atime, &grant->atime); 2278 ceph_decode_timespec(&ctime, &grant->ctime); 2279 ceph_fill_file_time(inode, issued, 2280 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime, 2281 &atime); 2282 2283 /* max size increase? */ 2284 if (max_size != ci->i_max_size) { 2285 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); 2286 ci->i_max_size = max_size; 2287 if (max_size >= ci->i_wanted_max_size) { 2288 ci->i_wanted_max_size = 0; /* reset */ 2289 ci->i_requested_max_size = 0; 2290 } 2291 wake = 1; 2292 } 2293 2294 /* check cap bits */ 2295 wanted = __ceph_caps_wanted(ci); 2296 used = __ceph_caps_used(ci); 2297 dirty = __ceph_caps_dirty(ci); 2298 dout(" my wanted = %s, used = %s, dirty %s\n", 2299 ceph_cap_string(wanted), 2300 ceph_cap_string(used), 2301 ceph_cap_string(dirty)); 2302 if (wanted != le32_to_cpu(grant->wanted)) { 2303 dout("mds wanted %s -> %s\n", 2304 ceph_cap_string(le32_to_cpu(grant->wanted)), 2305 ceph_cap_string(wanted)); 2306 grant->wanted = cpu_to_le32(wanted); 2307 } 2308 2309 cap->seq = seq; 2310 2311 /* file layout may have changed */ 2312 ci->i_layout = grant->layout; 2313 2314 /* revocation, grant, or no-op? */ 2315 if (cap->issued & ~newcaps) { 2316 dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued), 2317 ceph_cap_string(newcaps)); 2318 if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER) 2319 writeback = 1; /* will delay ack */ 2320 else if (dirty & ~newcaps) 2321 reply = 1; /* initiate writeback in check_caps */ 2322 else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 || 2323 revoked_rdcache) 2324 reply = 2; /* send revoke ack in check_caps */ 2325 cap->issued = newcaps; 2326 } else if (cap->issued == newcaps) { 2327 dout("caps unchanged: %s -> %s\n", 2328 ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); 2329 } else { 2330 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), 2331 ceph_cap_string(newcaps)); 2332 cap->issued = newcaps; 2333 cap->implemented |= newcaps; /* add bits only, to 2334 * avoid stepping on a 2335 * pending revocation */ 2336 wake = 1; 2337 } 2338 2339 spin_unlock(&inode->i_lock); 2340 if (writeback) 2341 /* 2342 * queue inode for writeback: we can't actually call 2343 * filemap_write_and_wait, etc. from message handler 2344 * context. 2345 */ 2346 ceph_queue_writeback(inode); 2347 if (queue_invalidate) 2348 ceph_queue_invalidate(inode); 2349 if (wake) 2350 wake_up(&ci->i_cap_wq); 2351 return reply; 2352 } 2353 2354 /* 2355 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the 2356 * MDS has been safely committed. 2357 */ 2358 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, 2359 struct ceph_mds_caps *m, 2360 struct ceph_mds_session *session, 2361 struct ceph_cap *cap) 2362 __releases(inode->i_lock) 2363 { 2364 struct ceph_inode_info *ci = ceph_inode(inode); 2365 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; 2366 unsigned seq = le32_to_cpu(m->seq); 2367 int dirty = le32_to_cpu(m->dirty); 2368 int cleaned = 0; 2369 int drop = 0; 2370 int i; 2371 2372 for (i = 0; i < CEPH_CAP_BITS; i++) 2373 if ((dirty & (1 << i)) && 2374 flush_tid == ci->i_cap_flush_tid[i]) 2375 cleaned |= 1 << i; 2376 2377 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 2378 " flushing %s -> %s\n", 2379 inode, session->s_mds, seq, ceph_cap_string(dirty), 2380 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), 2381 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); 2382 2383 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned)) 2384 goto out; 2385 2386 ci->i_flushing_caps &= ~cleaned; 2387 2388 spin_lock(&mdsc->cap_dirty_lock); 2389 if (ci->i_flushing_caps == 0) { 2390 list_del_init(&ci->i_flushing_item); 2391 if (!list_empty(&session->s_cap_flushing)) 2392 dout(" mds%d still flushing cap on %p\n", 2393 session->s_mds, 2394 &list_entry(session->s_cap_flushing.next, 2395 struct ceph_inode_info, 2396 i_flushing_item)->vfs_inode); 2397 mdsc->num_cap_flushing--; 2398 wake_up(&mdsc->cap_flushing_wq); 2399 dout(" inode %p now !flushing\n", inode); 2400 2401 if (ci->i_dirty_caps == 0) { 2402 dout(" inode %p now clean\n", inode); 2403 BUG_ON(!list_empty(&ci->i_dirty_item)); 2404 drop = 1; 2405 } else { 2406 BUG_ON(list_empty(&ci->i_dirty_item)); 2407 } 2408 } 2409 spin_unlock(&mdsc->cap_dirty_lock); 2410 wake_up(&ci->i_cap_wq); 2411 2412 out: 2413 spin_unlock(&inode->i_lock); 2414 if (drop) 2415 iput(inode); 2416 } 2417 2418 /* 2419 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can 2420 * throw away our cap_snap. 2421 * 2422 * Caller hold s_mutex. 2423 */ 2424 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, 2425 struct ceph_mds_caps *m, 2426 struct ceph_mds_session *session) 2427 { 2428 struct ceph_inode_info *ci = ceph_inode(inode); 2429 u64 follows = le64_to_cpu(m->snap_follows); 2430 struct ceph_cap_snap *capsnap; 2431 int drop = 0; 2432 2433 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 2434 inode, ci, session->s_mds, follows); 2435 2436 spin_lock(&inode->i_lock); 2437 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2438 if (capsnap->follows == follows) { 2439 if (capsnap->flush_tid != flush_tid) { 2440 dout(" cap_snap %p follows %lld tid %lld !=" 2441 " %lld\n", capsnap, follows, 2442 flush_tid, capsnap->flush_tid); 2443 break; 2444 } 2445 WARN_ON(capsnap->dirty_pages || capsnap->writing); 2446 dout(" removing cap_snap %p follows %lld\n", 2447 capsnap, follows); 2448 ceph_put_snap_context(capsnap->context); 2449 list_del(&capsnap->ci_item); 2450 list_del(&capsnap->flushing_item); 2451 ceph_put_cap_snap(capsnap); 2452 drop = 1; 2453 break; 2454 } else { 2455 dout(" skipping cap_snap %p follows %lld\n", 2456 capsnap, capsnap->follows); 2457 } 2458 } 2459 spin_unlock(&inode->i_lock); 2460 if (drop) 2461 iput(inode); 2462 } 2463 2464 /* 2465 * Handle TRUNC from MDS, indicating file truncation. 2466 * 2467 * caller hold s_mutex. 2468 */ 2469 static void handle_cap_trunc(struct inode *inode, 2470 struct ceph_mds_caps *trunc, 2471 struct ceph_mds_session *session) 2472 __releases(inode->i_lock) 2473 { 2474 struct ceph_inode_info *ci = ceph_inode(inode); 2475 int mds = session->s_mds; 2476 int seq = le32_to_cpu(trunc->seq); 2477 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); 2478 u64 truncate_size = le64_to_cpu(trunc->truncate_size); 2479 u64 size = le64_to_cpu(trunc->size); 2480 int implemented = 0; 2481 int dirty = __ceph_caps_dirty(ci); 2482 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented); 2483 int queue_trunc = 0; 2484 2485 issued |= implemented | dirty; 2486 2487 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n", 2488 inode, mds, seq, truncate_size, truncate_seq); 2489 queue_trunc = ceph_fill_file_size(inode, issued, 2490 truncate_seq, truncate_size, size); 2491 spin_unlock(&inode->i_lock); 2492 2493 if (queue_trunc) 2494 ceph_queue_vmtruncate(inode); 2495 } 2496 2497 /* 2498 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a 2499 * different one. If we are the most recent migration we've seen (as 2500 * indicated by mseq), make note of the migrating cap bits for the 2501 * duration (until we see the corresponding IMPORT). 2502 * 2503 * caller holds s_mutex 2504 */ 2505 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, 2506 struct ceph_mds_session *session) 2507 { 2508 struct ceph_inode_info *ci = ceph_inode(inode); 2509 int mds = session->s_mds; 2510 unsigned mseq = le32_to_cpu(ex->migrate_seq); 2511 struct ceph_cap *cap = NULL, *t; 2512 struct rb_node *p; 2513 int remember = 1; 2514 2515 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n", 2516 inode, ci, mds, mseq); 2517 2518 spin_lock(&inode->i_lock); 2519 2520 /* make sure we haven't seen a higher mseq */ 2521 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 2522 t = rb_entry(p, struct ceph_cap, ci_node); 2523 if (ceph_seq_cmp(t->mseq, mseq) > 0) { 2524 dout(" higher mseq on cap from mds%d\n", 2525 t->session->s_mds); 2526 remember = 0; 2527 } 2528 if (t->session->s_mds == mds) 2529 cap = t; 2530 } 2531 2532 if (cap) { 2533 if (remember) { 2534 /* make note */ 2535 ci->i_cap_exporting_mds = mds; 2536 ci->i_cap_exporting_mseq = mseq; 2537 ci->i_cap_exporting_issued = cap->issued; 2538 } 2539 __ceph_remove_cap(cap); 2540 } else { 2541 WARN_ON(!cap); 2542 } 2543 2544 spin_unlock(&inode->i_lock); 2545 } 2546 2547 /* 2548 * Handle cap IMPORT. If there are temp bits from an older EXPORT, 2549 * clean them up. 2550 * 2551 * caller holds s_mutex. 2552 */ 2553 static void handle_cap_import(struct ceph_mds_client *mdsc, 2554 struct inode *inode, struct ceph_mds_caps *im, 2555 struct ceph_mds_session *session, 2556 void *snaptrace, int snaptrace_len) 2557 { 2558 struct ceph_inode_info *ci = ceph_inode(inode); 2559 int mds = session->s_mds; 2560 unsigned issued = le32_to_cpu(im->caps); 2561 unsigned wanted = le32_to_cpu(im->wanted); 2562 unsigned seq = le32_to_cpu(im->seq); 2563 unsigned mseq = le32_to_cpu(im->migrate_seq); 2564 u64 realmino = le64_to_cpu(im->realm); 2565 u64 cap_id = le64_to_cpu(im->cap_id); 2566 2567 if (ci->i_cap_exporting_mds >= 0 && 2568 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) { 2569 dout("handle_cap_import inode %p ci %p mds%d mseq %d" 2570 " - cleared exporting from mds%d\n", 2571 inode, ci, mds, mseq, 2572 ci->i_cap_exporting_mds); 2573 ci->i_cap_exporting_issued = 0; 2574 ci->i_cap_exporting_mseq = 0; 2575 ci->i_cap_exporting_mds = -1; 2576 } else { 2577 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", 2578 inode, ci, mds, mseq); 2579 } 2580 2581 down_write(&mdsc->snap_rwsem); 2582 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len, 2583 false); 2584 downgrade_write(&mdsc->snap_rwsem); 2585 ceph_add_cap(inode, session, cap_id, -1, 2586 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH, 2587 NULL /* no caps context */); 2588 try_flush_caps(inode, session, NULL); 2589 up_read(&mdsc->snap_rwsem); 2590 } 2591 2592 /* 2593 * Handle a caps message from the MDS. 2594 * 2595 * Identify the appropriate session, inode, and call the right handler 2596 * based on the cap op. 2597 */ 2598 void ceph_handle_caps(struct ceph_mds_session *session, 2599 struct ceph_msg *msg) 2600 { 2601 struct ceph_mds_client *mdsc = session->s_mdsc; 2602 struct super_block *sb = mdsc->client->sb; 2603 struct inode *inode; 2604 struct ceph_cap *cap; 2605 struct ceph_mds_caps *h; 2606 int mds = le64_to_cpu(msg->hdr.src.name.num); 2607 int op; 2608 u32 seq; 2609 struct ceph_vino vino; 2610 u64 cap_id; 2611 u64 size, max_size; 2612 u64 tid; 2613 int check_caps = 0; 2614 int r; 2615 2616 dout("handle_caps from mds%d\n", mds); 2617 2618 /* decode */ 2619 tid = le64_to_cpu(msg->hdr.tid); 2620 if (msg->front.iov_len < sizeof(*h)) 2621 goto bad; 2622 h = msg->front.iov_base; 2623 op = le32_to_cpu(h->op); 2624 vino.ino = le64_to_cpu(h->ino); 2625 vino.snap = CEPH_NOSNAP; 2626 cap_id = le64_to_cpu(h->cap_id); 2627 seq = le32_to_cpu(h->seq); 2628 size = le64_to_cpu(h->size); 2629 max_size = le64_to_cpu(h->max_size); 2630 2631 mutex_lock(&session->s_mutex); 2632 session->s_seq++; 2633 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, 2634 (unsigned)seq); 2635 2636 /* lookup ino */ 2637 inode = ceph_find_inode(sb, vino); 2638 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 2639 vino.snap, inode); 2640 if (!inode) { 2641 dout(" i don't have ino %llx\n", vino.ino); 2642 goto done; 2643 } 2644 2645 /* these will work even if we don't have a cap yet */ 2646 switch (op) { 2647 case CEPH_CAP_OP_FLUSHSNAP_ACK: 2648 handle_cap_flushsnap_ack(inode, tid, h, session); 2649 goto done; 2650 2651 case CEPH_CAP_OP_EXPORT: 2652 handle_cap_export(inode, h, session); 2653 goto done; 2654 2655 case CEPH_CAP_OP_IMPORT: 2656 handle_cap_import(mdsc, inode, h, session, 2657 msg->middle, 2658 le32_to_cpu(h->snap_trace_len)); 2659 check_caps = 1; /* we may have sent a RELEASE to the old auth */ 2660 goto done; 2661 } 2662 2663 /* the rest require a cap */ 2664 spin_lock(&inode->i_lock); 2665 cap = __get_cap_for_mds(ceph_inode(inode), mds); 2666 if (!cap) { 2667 dout("no cap on %p ino %llx.%llx from mds%d, releasing\n", 2668 inode, ceph_ino(inode), ceph_snap(inode), mds); 2669 spin_unlock(&inode->i_lock); 2670 goto done; 2671 } 2672 2673 /* note that each of these drops i_lock for us */ 2674 switch (op) { 2675 case CEPH_CAP_OP_REVOKE: 2676 case CEPH_CAP_OP_GRANT: 2677 r = handle_cap_grant(inode, h, session, cap, msg->middle); 2678 if (r == 1) 2679 ceph_check_caps(ceph_inode(inode), 2680 CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, 2681 session); 2682 else if (r == 2) 2683 ceph_check_caps(ceph_inode(inode), 2684 CHECK_CAPS_NODELAY, 2685 session); 2686 break; 2687 2688 case CEPH_CAP_OP_FLUSH_ACK: 2689 handle_cap_flush_ack(inode, tid, h, session, cap); 2690 break; 2691 2692 case CEPH_CAP_OP_TRUNC: 2693 handle_cap_trunc(inode, h, session); 2694 break; 2695 2696 default: 2697 spin_unlock(&inode->i_lock); 2698 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 2699 ceph_cap_op_name(op)); 2700 } 2701 2702 done: 2703 mutex_unlock(&session->s_mutex); 2704 2705 if (check_caps) 2706 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL); 2707 if (inode) 2708 iput(inode); 2709 return; 2710 2711 bad: 2712 pr_err("ceph_handle_caps: corrupt message\n"); 2713 ceph_msg_dump(msg); 2714 return; 2715 } 2716 2717 /* 2718 * Delayed work handler to process end of delayed cap release LRU list. 2719 */ 2720 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) 2721 { 2722 struct ceph_inode_info *ci; 2723 int flags = CHECK_CAPS_NODELAY; 2724 2725 dout("check_delayed_caps\n"); 2726 while (1) { 2727 spin_lock(&mdsc->cap_delay_lock); 2728 if (list_empty(&mdsc->cap_delay_list)) 2729 break; 2730 ci = list_first_entry(&mdsc->cap_delay_list, 2731 struct ceph_inode_info, 2732 i_cap_delay_list); 2733 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && 2734 time_before(jiffies, ci->i_hold_caps_max)) 2735 break; 2736 list_del_init(&ci->i_cap_delay_list); 2737 spin_unlock(&mdsc->cap_delay_lock); 2738 dout("check_delayed_caps on %p\n", &ci->vfs_inode); 2739 ceph_check_caps(ci, flags, NULL); 2740 } 2741 spin_unlock(&mdsc->cap_delay_lock); 2742 } 2743 2744 /* 2745 * Flush all dirty caps to the mds 2746 */ 2747 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) 2748 { 2749 struct ceph_inode_info *ci; 2750 struct inode *inode; 2751 2752 dout("flush_dirty_caps\n"); 2753 spin_lock(&mdsc->cap_dirty_lock); 2754 while (!list_empty(&mdsc->cap_dirty)) { 2755 ci = list_first_entry(&mdsc->cap_dirty, 2756 struct ceph_inode_info, 2757 i_dirty_item); 2758 inode = igrab(&ci->vfs_inode); 2759 spin_unlock(&mdsc->cap_dirty_lock); 2760 if (inode) { 2761 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, 2762 NULL); 2763 iput(inode); 2764 } 2765 spin_lock(&mdsc->cap_dirty_lock); 2766 } 2767 spin_unlock(&mdsc->cap_dirty_lock); 2768 } 2769 2770 /* 2771 * Drop open file reference. If we were the last open file, 2772 * we may need to release capabilities to the MDS (or schedule 2773 * their delayed release). 2774 */ 2775 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) 2776 { 2777 struct inode *inode = &ci->vfs_inode; 2778 int last = 0; 2779 2780 spin_lock(&inode->i_lock); 2781 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode, 2782 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); 2783 BUG_ON(ci->i_nr_by_mode[fmode] == 0); 2784 if (--ci->i_nr_by_mode[fmode] == 0) 2785 last++; 2786 spin_unlock(&inode->i_lock); 2787 2788 if (last && ci->i_vino.snap == CEPH_NOSNAP) 2789 ceph_check_caps(ci, 0, NULL); 2790 } 2791 2792 /* 2793 * Helpers for embedding cap and dentry lease releases into mds 2794 * requests. 2795 * 2796 * @force is used by dentry_release (below) to force inclusion of a 2797 * record for the directory inode, even when there aren't any caps to 2798 * drop. 2799 */ 2800 int ceph_encode_inode_release(void **p, struct inode *inode, 2801 int mds, int drop, int unless, int force) 2802 { 2803 struct ceph_inode_info *ci = ceph_inode(inode); 2804 struct ceph_cap *cap; 2805 struct ceph_mds_request_release *rel = *p; 2806 int ret = 0; 2807 2808 dout("encode_inode_release %p mds%d drop %s unless %s\n", inode, 2809 mds, ceph_cap_string(drop), ceph_cap_string(unless)); 2810 2811 spin_lock(&inode->i_lock); 2812 cap = __get_cap_for_mds(ci, mds); 2813 if (cap && __cap_is_valid(cap)) { 2814 if (force || 2815 ((cap->issued & drop) && 2816 (cap->issued & unless) == 0)) { 2817 if ((cap->issued & drop) && 2818 (cap->issued & unless) == 0) { 2819 dout("encode_inode_release %p cap %p %s -> " 2820 "%s\n", inode, cap, 2821 ceph_cap_string(cap->issued), 2822 ceph_cap_string(cap->issued & ~drop)); 2823 cap->issued &= ~drop; 2824 cap->implemented &= ~drop; 2825 if (ci->i_ceph_flags & CEPH_I_NODELAY) { 2826 int wanted = __ceph_caps_wanted(ci); 2827 dout(" wanted %s -> %s (act %s)\n", 2828 ceph_cap_string(cap->mds_wanted), 2829 ceph_cap_string(cap->mds_wanted & 2830 ~wanted), 2831 ceph_cap_string(wanted)); 2832 cap->mds_wanted &= wanted; 2833 } 2834 } else { 2835 dout("encode_inode_release %p cap %p %s" 2836 " (force)\n", inode, cap, 2837 ceph_cap_string(cap->issued)); 2838 } 2839 2840 rel->ino = cpu_to_le64(ceph_ino(inode)); 2841 rel->cap_id = cpu_to_le64(cap->cap_id); 2842 rel->seq = cpu_to_le32(cap->seq); 2843 rel->issue_seq = cpu_to_le32(cap->issue_seq), 2844 rel->mseq = cpu_to_le32(cap->mseq); 2845 rel->caps = cpu_to_le32(cap->issued); 2846 rel->wanted = cpu_to_le32(cap->mds_wanted); 2847 rel->dname_len = 0; 2848 rel->dname_seq = 0; 2849 *p += sizeof(*rel); 2850 ret = 1; 2851 } else { 2852 dout("encode_inode_release %p cap %p %s\n", 2853 inode, cap, ceph_cap_string(cap->issued)); 2854 } 2855 } 2856 spin_unlock(&inode->i_lock); 2857 return ret; 2858 } 2859 2860 int ceph_encode_dentry_release(void **p, struct dentry *dentry, 2861 int mds, int drop, int unless) 2862 { 2863 struct inode *dir = dentry->d_parent->d_inode; 2864 struct ceph_mds_request_release *rel = *p; 2865 struct ceph_dentry_info *di = ceph_dentry(dentry); 2866 int force = 0; 2867 int ret; 2868 2869 /* 2870 * force an record for the directory caps if we have a dentry lease. 2871 * this is racy (can't take i_lock and d_lock together), but it 2872 * doesn't have to be perfect; the mds will revoke anything we don't 2873 * release. 2874 */ 2875 spin_lock(&dentry->d_lock); 2876 if (di->lease_session && di->lease_session->s_mds == mds) 2877 force = 1; 2878 spin_unlock(&dentry->d_lock); 2879 2880 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); 2881 2882 spin_lock(&dentry->d_lock); 2883 if (ret && di->lease_session && di->lease_session->s_mds == mds) { 2884 dout("encode_dentry_release %p mds%d seq %d\n", 2885 dentry, mds, (int)di->lease_seq); 2886 rel->dname_len = cpu_to_le32(dentry->d_name.len); 2887 memcpy(*p, dentry->d_name.name, dentry->d_name.len); 2888 *p += dentry->d_name.len; 2889 rel->dname_seq = cpu_to_le32(di->lease_seq); 2890 } 2891 spin_unlock(&dentry->d_lock); 2892 return ret; 2893 } 2894