1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/fs.h> 4 #include <linux/kernel.h> 5 #include <linux/sched.h> 6 #include <linux/slab.h> 7 #include <linux/vmalloc.h> 8 #include <linux/wait.h> 9 #include <linux/writeback.h> 10 11 #include "super.h" 12 #include "mds_client.h" 13 #include <linux/ceph/decode.h> 14 #include <linux/ceph/messenger.h> 15 16 /* 17 * Capability management 18 * 19 * The Ceph metadata servers control client access to inode metadata 20 * and file data by issuing capabilities, granting clients permission 21 * to read and/or write both inode field and file data to OSDs 22 * (storage nodes). Each capability consists of a set of bits 23 * indicating which operations are allowed. 24 * 25 * If the client holds a *_SHARED cap, the client has a coherent value 26 * that can be safely read from the cached inode. 27 * 28 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the 29 * client is allowed to change inode attributes (e.g., file size, 30 * mtime), note its dirty state in the ceph_cap, and asynchronously 31 * flush that metadata change to the MDS. 32 * 33 * In the event of a conflicting operation (perhaps by another 34 * client), the MDS will revoke the conflicting client capabilities. 35 * 36 * In order for a client to cache an inode, it must hold a capability 37 * with at least one MDS server. When inodes are released, release 38 * notifications are batched and periodically sent en masse to the MDS 39 * cluster to release server state. 40 */ 41 42 43 /* 44 * Generate readable cap strings for debugging output. 45 */ 46 #define MAX_CAP_STR 20 47 static char cap_str[MAX_CAP_STR][40]; 48 static DEFINE_SPINLOCK(cap_str_lock); 49 static int last_cap_str; 50 51 static char *gcap_string(char *s, int c) 52 { 53 if (c & CEPH_CAP_GSHARED) 54 *s++ = 's'; 55 if (c & CEPH_CAP_GEXCL) 56 *s++ = 'x'; 57 if (c & CEPH_CAP_GCACHE) 58 *s++ = 'c'; 59 if (c & CEPH_CAP_GRD) 60 *s++ = 'r'; 61 if (c & CEPH_CAP_GWR) 62 *s++ = 'w'; 63 if (c & CEPH_CAP_GBUFFER) 64 *s++ = 'b'; 65 if (c & CEPH_CAP_GLAZYIO) 66 *s++ = 'l'; 67 return s; 68 } 69 70 const char *ceph_cap_string(int caps) 71 { 72 int i; 73 char *s; 74 int c; 75 76 spin_lock(&cap_str_lock); 77 i = last_cap_str++; 78 if (last_cap_str == MAX_CAP_STR) 79 last_cap_str = 0; 80 spin_unlock(&cap_str_lock); 81 82 s = cap_str[i]; 83 84 if (caps & CEPH_CAP_PIN) 85 *s++ = 'p'; 86 87 c = (caps >> CEPH_CAP_SAUTH) & 3; 88 if (c) { 89 *s++ = 'A'; 90 s = gcap_string(s, c); 91 } 92 93 c = (caps >> CEPH_CAP_SLINK) & 3; 94 if (c) { 95 *s++ = 'L'; 96 s = gcap_string(s, c); 97 } 98 99 c = (caps >> CEPH_CAP_SXATTR) & 3; 100 if (c) { 101 *s++ = 'X'; 102 s = gcap_string(s, c); 103 } 104 105 c = caps >> CEPH_CAP_SFILE; 106 if (c) { 107 *s++ = 'F'; 108 s = gcap_string(s, c); 109 } 110 111 if (s == cap_str[i]) 112 *s++ = '-'; 113 *s = 0; 114 return cap_str[i]; 115 } 116 117 void ceph_caps_init(struct ceph_mds_client *mdsc) 118 { 119 INIT_LIST_HEAD(&mdsc->caps_list); 120 spin_lock_init(&mdsc->caps_list_lock); 121 } 122 123 void ceph_caps_finalize(struct ceph_mds_client *mdsc) 124 { 125 struct ceph_cap *cap; 126 127 spin_lock(&mdsc->caps_list_lock); 128 while (!list_empty(&mdsc->caps_list)) { 129 cap = list_first_entry(&mdsc->caps_list, 130 struct ceph_cap, caps_item); 131 list_del(&cap->caps_item); 132 kmem_cache_free(ceph_cap_cachep, cap); 133 } 134 mdsc->caps_total_count = 0; 135 mdsc->caps_avail_count = 0; 136 mdsc->caps_use_count = 0; 137 mdsc->caps_reserve_count = 0; 138 mdsc->caps_min_count = 0; 139 spin_unlock(&mdsc->caps_list_lock); 140 } 141 142 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta) 143 { 144 spin_lock(&mdsc->caps_list_lock); 145 mdsc->caps_min_count += delta; 146 BUG_ON(mdsc->caps_min_count < 0); 147 spin_unlock(&mdsc->caps_list_lock); 148 } 149 150 int ceph_reserve_caps(struct ceph_mds_client *mdsc, 151 struct ceph_cap_reservation *ctx, int need) 152 { 153 int i; 154 struct ceph_cap *cap; 155 int have; 156 int alloc = 0; 157 LIST_HEAD(newcaps); 158 int ret = 0; 159 160 dout("reserve caps ctx=%p need=%d\n", ctx, need); 161 162 /* first reserve any caps that are already allocated */ 163 spin_lock(&mdsc->caps_list_lock); 164 if (mdsc->caps_avail_count >= need) 165 have = need; 166 else 167 have = mdsc->caps_avail_count; 168 mdsc->caps_avail_count -= have; 169 mdsc->caps_reserve_count += have; 170 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 171 mdsc->caps_reserve_count + 172 mdsc->caps_avail_count); 173 spin_unlock(&mdsc->caps_list_lock); 174 175 for (i = have; i < need; i++) { 176 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 177 if (!cap) { 178 ret = -ENOMEM; 179 goto out_alloc_count; 180 } 181 list_add(&cap->caps_item, &newcaps); 182 alloc++; 183 } 184 BUG_ON(have + alloc != need); 185 186 spin_lock(&mdsc->caps_list_lock); 187 mdsc->caps_total_count += alloc; 188 mdsc->caps_reserve_count += alloc; 189 list_splice(&newcaps, &mdsc->caps_list); 190 191 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 192 mdsc->caps_reserve_count + 193 mdsc->caps_avail_count); 194 spin_unlock(&mdsc->caps_list_lock); 195 196 ctx->count = need; 197 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", 198 ctx, mdsc->caps_total_count, mdsc->caps_use_count, 199 mdsc->caps_reserve_count, mdsc->caps_avail_count); 200 return 0; 201 202 out_alloc_count: 203 /* we didn't manage to reserve as much as we needed */ 204 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n", 205 ctx, need, have); 206 return ret; 207 } 208 209 int ceph_unreserve_caps(struct ceph_mds_client *mdsc, 210 struct ceph_cap_reservation *ctx) 211 { 212 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); 213 if (ctx->count) { 214 spin_lock(&mdsc->caps_list_lock); 215 BUG_ON(mdsc->caps_reserve_count < ctx->count); 216 mdsc->caps_reserve_count -= ctx->count; 217 mdsc->caps_avail_count += ctx->count; 218 ctx->count = 0; 219 dout("unreserve caps %d = %d used + %d resv + %d avail\n", 220 mdsc->caps_total_count, mdsc->caps_use_count, 221 mdsc->caps_reserve_count, mdsc->caps_avail_count); 222 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 223 mdsc->caps_reserve_count + 224 mdsc->caps_avail_count); 225 spin_unlock(&mdsc->caps_list_lock); 226 } 227 return 0; 228 } 229 230 static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc, 231 struct ceph_cap_reservation *ctx) 232 { 233 struct ceph_cap *cap = NULL; 234 235 /* temporary, until we do something about cap import/export */ 236 if (!ctx) { 237 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 238 if (cap) { 239 spin_lock(&mdsc->caps_list_lock); 240 mdsc->caps_use_count++; 241 mdsc->caps_total_count++; 242 spin_unlock(&mdsc->caps_list_lock); 243 } 244 return cap; 245 } 246 247 spin_lock(&mdsc->caps_list_lock); 248 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", 249 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, 250 mdsc->caps_reserve_count, mdsc->caps_avail_count); 251 BUG_ON(!ctx->count); 252 BUG_ON(ctx->count > mdsc->caps_reserve_count); 253 BUG_ON(list_empty(&mdsc->caps_list)); 254 255 ctx->count--; 256 mdsc->caps_reserve_count--; 257 mdsc->caps_use_count++; 258 259 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); 260 list_del(&cap->caps_item); 261 262 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 263 mdsc->caps_reserve_count + mdsc->caps_avail_count); 264 spin_unlock(&mdsc->caps_list_lock); 265 return cap; 266 } 267 268 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) 269 { 270 spin_lock(&mdsc->caps_list_lock); 271 dout("put_cap %p %d = %d used + %d resv + %d avail\n", 272 cap, mdsc->caps_total_count, mdsc->caps_use_count, 273 mdsc->caps_reserve_count, mdsc->caps_avail_count); 274 mdsc->caps_use_count--; 275 /* 276 * Keep some preallocated caps around (ceph_min_count), to 277 * avoid lots of free/alloc churn. 278 */ 279 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + 280 mdsc->caps_min_count) { 281 mdsc->caps_total_count--; 282 kmem_cache_free(ceph_cap_cachep, cap); 283 } else { 284 mdsc->caps_avail_count++; 285 list_add(&cap->caps_item, &mdsc->caps_list); 286 } 287 288 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 289 mdsc->caps_reserve_count + mdsc->caps_avail_count); 290 spin_unlock(&mdsc->caps_list_lock); 291 } 292 293 void ceph_reservation_status(struct ceph_fs_client *fsc, 294 int *total, int *avail, int *used, int *reserved, 295 int *min) 296 { 297 struct ceph_mds_client *mdsc = fsc->mdsc; 298 299 if (total) 300 *total = mdsc->caps_total_count; 301 if (avail) 302 *avail = mdsc->caps_avail_count; 303 if (used) 304 *used = mdsc->caps_use_count; 305 if (reserved) 306 *reserved = mdsc->caps_reserve_count; 307 if (min) 308 *min = mdsc->caps_min_count; 309 } 310 311 /* 312 * Find ceph_cap for given mds, if any. 313 * 314 * Called with i_ceph_lock held. 315 */ 316 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) 317 { 318 struct ceph_cap *cap; 319 struct rb_node *n = ci->i_caps.rb_node; 320 321 while (n) { 322 cap = rb_entry(n, struct ceph_cap, ci_node); 323 if (mds < cap->mds) 324 n = n->rb_left; 325 else if (mds > cap->mds) 326 n = n->rb_right; 327 else 328 return cap; 329 } 330 return NULL; 331 } 332 333 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) 334 { 335 struct ceph_cap *cap; 336 337 spin_lock(&ci->i_ceph_lock); 338 cap = __get_cap_for_mds(ci, mds); 339 spin_unlock(&ci->i_ceph_lock); 340 return cap; 341 } 342 343 /* 344 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1. 345 */ 346 static int __ceph_get_cap_mds(struct ceph_inode_info *ci) 347 { 348 struct ceph_cap *cap; 349 int mds = -1; 350 struct rb_node *p; 351 352 /* prefer mds with WR|BUFFER|EXCL caps */ 353 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 354 cap = rb_entry(p, struct ceph_cap, ci_node); 355 mds = cap->mds; 356 if (cap->issued & (CEPH_CAP_FILE_WR | 357 CEPH_CAP_FILE_BUFFER | 358 CEPH_CAP_FILE_EXCL)) 359 break; 360 } 361 return mds; 362 } 363 364 int ceph_get_cap_mds(struct inode *inode) 365 { 366 struct ceph_inode_info *ci = ceph_inode(inode); 367 int mds; 368 spin_lock(&ci->i_ceph_lock); 369 mds = __ceph_get_cap_mds(ceph_inode(inode)); 370 spin_unlock(&ci->i_ceph_lock); 371 return mds; 372 } 373 374 /* 375 * Called under i_ceph_lock. 376 */ 377 static void __insert_cap_node(struct ceph_inode_info *ci, 378 struct ceph_cap *new) 379 { 380 struct rb_node **p = &ci->i_caps.rb_node; 381 struct rb_node *parent = NULL; 382 struct ceph_cap *cap = NULL; 383 384 while (*p) { 385 parent = *p; 386 cap = rb_entry(parent, struct ceph_cap, ci_node); 387 if (new->mds < cap->mds) 388 p = &(*p)->rb_left; 389 else if (new->mds > cap->mds) 390 p = &(*p)->rb_right; 391 else 392 BUG(); 393 } 394 395 rb_link_node(&new->ci_node, parent, p); 396 rb_insert_color(&new->ci_node, &ci->i_caps); 397 } 398 399 /* 400 * (re)set cap hold timeouts, which control the delayed release 401 * of unused caps back to the MDS. Should be called on cap use. 402 */ 403 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 404 struct ceph_inode_info *ci) 405 { 406 struct ceph_mount_options *ma = mdsc->fsc->mount_options; 407 408 ci->i_hold_caps_min = round_jiffies(jiffies + 409 ma->caps_wanted_delay_min * HZ); 410 ci->i_hold_caps_max = round_jiffies(jiffies + 411 ma->caps_wanted_delay_max * HZ); 412 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, 413 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); 414 } 415 416 /* 417 * (Re)queue cap at the end of the delayed cap release list. 418 * 419 * If I_FLUSH is set, leave the inode at the front of the list. 420 * 421 * Caller holds i_ceph_lock 422 * -> we take mdsc->cap_delay_lock 423 */ 424 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 425 struct ceph_inode_info *ci) 426 { 427 __cap_set_timeouts(mdsc, ci); 428 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, 429 ci->i_ceph_flags, ci->i_hold_caps_max); 430 if (!mdsc->stopping) { 431 spin_lock(&mdsc->cap_delay_lock); 432 if (!list_empty(&ci->i_cap_delay_list)) { 433 if (ci->i_ceph_flags & CEPH_I_FLUSH) 434 goto no_change; 435 list_del_init(&ci->i_cap_delay_list); 436 } 437 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 438 no_change: 439 spin_unlock(&mdsc->cap_delay_lock); 440 } 441 } 442 443 /* 444 * Queue an inode for immediate writeback. Mark inode with I_FLUSH, 445 * indicating we should send a cap message to flush dirty metadata 446 * asap, and move to the front of the delayed cap list. 447 */ 448 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, 449 struct ceph_inode_info *ci) 450 { 451 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); 452 spin_lock(&mdsc->cap_delay_lock); 453 ci->i_ceph_flags |= CEPH_I_FLUSH; 454 if (!list_empty(&ci->i_cap_delay_list)) 455 list_del_init(&ci->i_cap_delay_list); 456 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 457 spin_unlock(&mdsc->cap_delay_lock); 458 } 459 460 /* 461 * Cancel delayed work on cap. 462 * 463 * Caller must hold i_ceph_lock. 464 */ 465 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 466 struct ceph_inode_info *ci) 467 { 468 dout("__cap_delay_cancel %p\n", &ci->vfs_inode); 469 if (list_empty(&ci->i_cap_delay_list)) 470 return; 471 spin_lock(&mdsc->cap_delay_lock); 472 list_del_init(&ci->i_cap_delay_list); 473 spin_unlock(&mdsc->cap_delay_lock); 474 } 475 476 /* 477 * Common issue checks for add_cap, handle_cap_grant. 478 */ 479 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, 480 unsigned issued) 481 { 482 unsigned had = __ceph_caps_issued(ci, NULL); 483 484 /* 485 * Each time we receive FILE_CACHE anew, we increment 486 * i_rdcache_gen. 487 */ 488 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && 489 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) 490 ci->i_rdcache_gen++; 491 492 /* 493 * if we are newly issued FILE_SHARED, mark dir not complete; we 494 * don't know what happened to this directory while we didn't 495 * have the cap. 496 */ 497 if ((issued & CEPH_CAP_FILE_SHARED) && 498 (had & CEPH_CAP_FILE_SHARED) == 0) { 499 ci->i_shared_gen++; 500 if (S_ISDIR(ci->vfs_inode.i_mode)) { 501 dout(" marking %p NOT complete\n", &ci->vfs_inode); 502 __ceph_dir_clear_complete(ci); 503 } 504 } 505 } 506 507 /* 508 * Add a capability under the given MDS session. 509 * 510 * Caller should hold session snap_rwsem (read) and s_mutex. 511 * 512 * @fmode is the open file mode, if we are opening a file, otherwise 513 * it is < 0. (This is so we can atomically add the cap and add an 514 * open file reference to it.) 515 */ 516 int ceph_add_cap(struct inode *inode, 517 struct ceph_mds_session *session, u64 cap_id, 518 int fmode, unsigned issued, unsigned wanted, 519 unsigned seq, unsigned mseq, u64 realmino, int flags, 520 struct ceph_cap_reservation *caps_reservation) 521 { 522 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 523 struct ceph_inode_info *ci = ceph_inode(inode); 524 struct ceph_cap *new_cap = NULL; 525 struct ceph_cap *cap; 526 int mds = session->s_mds; 527 int actual_wanted; 528 529 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, 530 session->s_mds, cap_id, ceph_cap_string(issued), seq); 531 532 /* 533 * If we are opening the file, include file mode wanted bits 534 * in wanted. 535 */ 536 if (fmode >= 0) 537 wanted |= ceph_caps_for_mode(fmode); 538 539 retry: 540 spin_lock(&ci->i_ceph_lock); 541 cap = __get_cap_for_mds(ci, mds); 542 if (!cap) { 543 if (new_cap) { 544 cap = new_cap; 545 new_cap = NULL; 546 } else { 547 spin_unlock(&ci->i_ceph_lock); 548 new_cap = get_cap(mdsc, caps_reservation); 549 if (new_cap == NULL) 550 return -ENOMEM; 551 goto retry; 552 } 553 554 cap->issued = 0; 555 cap->implemented = 0; 556 cap->mds = mds; 557 cap->mds_wanted = 0; 558 cap->mseq = 0; 559 560 cap->ci = ci; 561 __insert_cap_node(ci, cap); 562 563 /* clear out old exporting info? (i.e. on cap import) */ 564 if (ci->i_cap_exporting_mds == mds) { 565 ci->i_cap_exporting_issued = 0; 566 ci->i_cap_exporting_mseq = 0; 567 ci->i_cap_exporting_mds = -1; 568 } 569 570 /* add to session cap list */ 571 cap->session = session; 572 spin_lock(&session->s_cap_lock); 573 list_add_tail(&cap->session_caps, &session->s_caps); 574 session->s_nr_caps++; 575 spin_unlock(&session->s_cap_lock); 576 } else if (new_cap) 577 ceph_put_cap(mdsc, new_cap); 578 579 if (!ci->i_snap_realm) { 580 /* 581 * add this inode to the appropriate snap realm 582 */ 583 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, 584 realmino); 585 if (realm) { 586 ceph_get_snap_realm(mdsc, realm); 587 spin_lock(&realm->inodes_with_caps_lock); 588 ci->i_snap_realm = realm; 589 list_add(&ci->i_snap_realm_item, 590 &realm->inodes_with_caps); 591 spin_unlock(&realm->inodes_with_caps_lock); 592 } else { 593 pr_err("ceph_add_cap: couldn't find snap realm %llx\n", 594 realmino); 595 WARN_ON(!realm); 596 } 597 } 598 599 __check_cap_issue(ci, cap, issued); 600 601 /* 602 * If we are issued caps we don't want, or the mds' wanted 603 * value appears to be off, queue a check so we'll release 604 * later and/or update the mds wanted value. 605 */ 606 actual_wanted = __ceph_caps_wanted(ci); 607 if ((wanted & ~actual_wanted) || 608 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { 609 dout(" issued %s, mds wanted %s, actual %s, queueing\n", 610 ceph_cap_string(issued), ceph_cap_string(wanted), 611 ceph_cap_string(actual_wanted)); 612 __cap_delay_requeue(mdsc, ci); 613 } 614 615 if (flags & CEPH_CAP_FLAG_AUTH) 616 ci->i_auth_cap = cap; 617 else if (ci->i_auth_cap == cap) { 618 ci->i_auth_cap = NULL; 619 spin_lock(&mdsc->cap_dirty_lock); 620 if (!list_empty(&ci->i_dirty_item)) { 621 dout(" moving %p to cap_dirty_migrating\n", inode); 622 list_move(&ci->i_dirty_item, 623 &mdsc->cap_dirty_migrating); 624 } 625 spin_unlock(&mdsc->cap_dirty_lock); 626 } 627 628 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", 629 inode, ceph_vinop(inode), cap, ceph_cap_string(issued), 630 ceph_cap_string(issued|cap->issued), seq, mds); 631 cap->cap_id = cap_id; 632 cap->issued = issued; 633 cap->implemented |= issued; 634 if (mseq > cap->mseq) 635 cap->mds_wanted = wanted; 636 else 637 cap->mds_wanted |= wanted; 638 cap->seq = seq; 639 cap->issue_seq = seq; 640 cap->mseq = mseq; 641 cap->cap_gen = session->s_cap_gen; 642 643 if (fmode >= 0) 644 __ceph_get_fmode(ci, fmode); 645 spin_unlock(&ci->i_ceph_lock); 646 wake_up_all(&ci->i_cap_wq); 647 return 0; 648 } 649 650 /* 651 * Return true if cap has not timed out and belongs to the current 652 * generation of the MDS session (i.e. has not gone 'stale' due to 653 * us losing touch with the mds). 654 */ 655 static int __cap_is_valid(struct ceph_cap *cap) 656 { 657 unsigned long ttl; 658 u32 gen; 659 660 spin_lock(&cap->session->s_gen_ttl_lock); 661 gen = cap->session->s_cap_gen; 662 ttl = cap->session->s_cap_ttl; 663 spin_unlock(&cap->session->s_gen_ttl_lock); 664 665 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 666 dout("__cap_is_valid %p cap %p issued %s " 667 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, 668 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); 669 return 0; 670 } 671 672 return 1; 673 } 674 675 /* 676 * Return set of valid cap bits issued to us. Note that caps time 677 * out, and may be invalidated in bulk if the client session times out 678 * and session->s_cap_gen is bumped. 679 */ 680 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) 681 { 682 int have = ci->i_snap_caps | ci->i_cap_exporting_issued; 683 struct ceph_cap *cap; 684 struct rb_node *p; 685 686 if (implemented) 687 *implemented = 0; 688 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 689 cap = rb_entry(p, struct ceph_cap, ci_node); 690 if (!__cap_is_valid(cap)) 691 continue; 692 dout("__ceph_caps_issued %p cap %p issued %s\n", 693 &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); 694 have |= cap->issued; 695 if (implemented) 696 *implemented |= cap->implemented; 697 } 698 return have; 699 } 700 701 /* 702 * Get cap bits issued by caps other than @ocap 703 */ 704 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) 705 { 706 int have = ci->i_snap_caps; 707 struct ceph_cap *cap; 708 struct rb_node *p; 709 710 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 711 cap = rb_entry(p, struct ceph_cap, ci_node); 712 if (cap == ocap) 713 continue; 714 if (!__cap_is_valid(cap)) 715 continue; 716 have |= cap->issued; 717 } 718 return have; 719 } 720 721 /* 722 * Move a cap to the end of the LRU (oldest caps at list head, newest 723 * at list tail). 724 */ 725 static void __touch_cap(struct ceph_cap *cap) 726 { 727 struct ceph_mds_session *s = cap->session; 728 729 spin_lock(&s->s_cap_lock); 730 if (s->s_cap_iterator == NULL) { 731 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, 732 s->s_mds); 733 list_move_tail(&cap->session_caps, &s->s_caps); 734 } else { 735 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", 736 &cap->ci->vfs_inode, cap, s->s_mds); 737 } 738 spin_unlock(&s->s_cap_lock); 739 } 740 741 /* 742 * Check if we hold the given mask. If so, move the cap(s) to the 743 * front of their respective LRUs. (This is the preferred way for 744 * callers to check for caps they want.) 745 */ 746 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) 747 { 748 struct ceph_cap *cap; 749 struct rb_node *p; 750 int have = ci->i_snap_caps; 751 752 if ((have & mask) == mask) { 753 dout("__ceph_caps_issued_mask %p snap issued %s" 754 " (mask %s)\n", &ci->vfs_inode, 755 ceph_cap_string(have), 756 ceph_cap_string(mask)); 757 return 1; 758 } 759 760 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 761 cap = rb_entry(p, struct ceph_cap, ci_node); 762 if (!__cap_is_valid(cap)) 763 continue; 764 if ((cap->issued & mask) == mask) { 765 dout("__ceph_caps_issued_mask %p cap %p issued %s" 766 " (mask %s)\n", &ci->vfs_inode, cap, 767 ceph_cap_string(cap->issued), 768 ceph_cap_string(mask)); 769 if (touch) 770 __touch_cap(cap); 771 return 1; 772 } 773 774 /* does a combination of caps satisfy mask? */ 775 have |= cap->issued; 776 if ((have & mask) == mask) { 777 dout("__ceph_caps_issued_mask %p combo issued %s" 778 " (mask %s)\n", &ci->vfs_inode, 779 ceph_cap_string(cap->issued), 780 ceph_cap_string(mask)); 781 if (touch) { 782 struct rb_node *q; 783 784 /* touch this + preceding caps */ 785 __touch_cap(cap); 786 for (q = rb_first(&ci->i_caps); q != p; 787 q = rb_next(q)) { 788 cap = rb_entry(q, struct ceph_cap, 789 ci_node); 790 if (!__cap_is_valid(cap)) 791 continue; 792 __touch_cap(cap); 793 } 794 } 795 return 1; 796 } 797 } 798 799 return 0; 800 } 801 802 /* 803 * Return true if mask caps are currently being revoked by an MDS. 804 */ 805 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) 806 { 807 struct inode *inode = &ci->vfs_inode; 808 struct ceph_cap *cap; 809 struct rb_node *p; 810 int ret = 0; 811 812 spin_lock(&ci->i_ceph_lock); 813 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 814 cap = rb_entry(p, struct ceph_cap, ci_node); 815 if (__cap_is_valid(cap) && 816 (cap->implemented & ~cap->issued & mask)) { 817 ret = 1; 818 break; 819 } 820 } 821 spin_unlock(&ci->i_ceph_lock); 822 dout("ceph_caps_revoking %p %s = %d\n", inode, 823 ceph_cap_string(mask), ret); 824 return ret; 825 } 826 827 int __ceph_caps_used(struct ceph_inode_info *ci) 828 { 829 int used = 0; 830 if (ci->i_pin_ref) 831 used |= CEPH_CAP_PIN; 832 if (ci->i_rd_ref) 833 used |= CEPH_CAP_FILE_RD; 834 if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages) 835 used |= CEPH_CAP_FILE_CACHE; 836 if (ci->i_wr_ref) 837 used |= CEPH_CAP_FILE_WR; 838 if (ci->i_wb_ref || ci->i_wrbuffer_ref) 839 used |= CEPH_CAP_FILE_BUFFER; 840 return used; 841 } 842 843 /* 844 * wanted, by virtue of open file modes 845 */ 846 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) 847 { 848 int want = 0; 849 int mode; 850 for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++) 851 if (ci->i_nr_by_mode[mode]) 852 want |= ceph_caps_for_mode(mode); 853 return want; 854 } 855 856 /* 857 * Return caps we have registered with the MDS(s) as 'wanted'. 858 */ 859 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) 860 { 861 struct ceph_cap *cap; 862 struct rb_node *p; 863 int mds_wanted = 0; 864 865 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 866 cap = rb_entry(p, struct ceph_cap, ci_node); 867 if (!__cap_is_valid(cap)) 868 continue; 869 mds_wanted |= cap->mds_wanted; 870 } 871 return mds_wanted; 872 } 873 874 /* 875 * called under i_ceph_lock 876 */ 877 static int __ceph_is_any_caps(struct ceph_inode_info *ci) 878 { 879 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0; 880 } 881 882 /* 883 * Remove a cap. Take steps to deal with a racing iterate_session_caps. 884 * 885 * caller should hold i_ceph_lock. 886 * caller will not hold session s_mutex if called from destroy_inode. 887 */ 888 void __ceph_remove_cap(struct ceph_cap *cap) 889 { 890 struct ceph_mds_session *session = cap->session; 891 struct ceph_inode_info *ci = cap->ci; 892 struct ceph_mds_client *mdsc = 893 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 894 int removed = 0; 895 896 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); 897 898 /* remove from session list */ 899 spin_lock(&session->s_cap_lock); 900 if (session->s_cap_iterator == cap) { 901 /* not yet, we are iterating over this very cap */ 902 dout("__ceph_remove_cap delaying %p removal from session %p\n", 903 cap, cap->session); 904 } else { 905 list_del_init(&cap->session_caps); 906 session->s_nr_caps--; 907 cap->session = NULL; 908 removed = 1; 909 } 910 /* protect backpointer with s_cap_lock: see iterate_session_caps */ 911 cap->ci = NULL; 912 spin_unlock(&session->s_cap_lock); 913 914 /* remove from inode list */ 915 rb_erase(&cap->ci_node, &ci->i_caps); 916 if (ci->i_auth_cap == cap) 917 ci->i_auth_cap = NULL; 918 919 if (removed) 920 ceph_put_cap(mdsc, cap); 921 922 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { 923 struct ceph_snap_realm *realm = ci->i_snap_realm; 924 spin_lock(&realm->inodes_with_caps_lock); 925 list_del_init(&ci->i_snap_realm_item); 926 ci->i_snap_realm_counter++; 927 ci->i_snap_realm = NULL; 928 spin_unlock(&realm->inodes_with_caps_lock); 929 ceph_put_snap_realm(mdsc, realm); 930 } 931 if (!__ceph_is_any_real_caps(ci)) 932 __cap_delay_cancel(mdsc, ci); 933 } 934 935 /* 936 * Build and send a cap message to the given MDS. 937 * 938 * Caller should be holding s_mutex. 939 */ 940 static int send_cap_msg(struct ceph_mds_session *session, 941 u64 ino, u64 cid, int op, 942 int caps, int wanted, int dirty, 943 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq, 944 u64 size, u64 max_size, 945 struct timespec *mtime, struct timespec *atime, 946 u64 time_warp_seq, 947 kuid_t uid, kgid_t gid, umode_t mode, 948 u64 xattr_version, 949 struct ceph_buffer *xattrs_buf, 950 u64 follows) 951 { 952 struct ceph_mds_caps *fc; 953 struct ceph_msg *msg; 954 955 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s" 956 " seq %u/%u mseq %u follows %lld size %llu/%llu" 957 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op), 958 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted), 959 ceph_cap_string(dirty), 960 seq, issue_seq, mseq, follows, size, max_size, 961 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); 962 963 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false); 964 if (!msg) 965 return -ENOMEM; 966 967 msg->hdr.tid = cpu_to_le64(flush_tid); 968 969 fc = msg->front.iov_base; 970 memset(fc, 0, sizeof(*fc)); 971 972 fc->cap_id = cpu_to_le64(cid); 973 fc->op = cpu_to_le32(op); 974 fc->seq = cpu_to_le32(seq); 975 fc->issue_seq = cpu_to_le32(issue_seq); 976 fc->migrate_seq = cpu_to_le32(mseq); 977 fc->caps = cpu_to_le32(caps); 978 fc->wanted = cpu_to_le32(wanted); 979 fc->dirty = cpu_to_le32(dirty); 980 fc->ino = cpu_to_le64(ino); 981 fc->snap_follows = cpu_to_le64(follows); 982 983 fc->size = cpu_to_le64(size); 984 fc->max_size = cpu_to_le64(max_size); 985 if (mtime) 986 ceph_encode_timespec(&fc->mtime, mtime); 987 if (atime) 988 ceph_encode_timespec(&fc->atime, atime); 989 fc->time_warp_seq = cpu_to_le32(time_warp_seq); 990 991 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid)); 992 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid)); 993 fc->mode = cpu_to_le32(mode); 994 995 fc->xattr_version = cpu_to_le64(xattr_version); 996 if (xattrs_buf) { 997 msg->middle = ceph_buffer_get(xattrs_buf); 998 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len); 999 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len); 1000 } 1001 1002 ceph_con_send(&session->s_con, msg); 1003 return 0; 1004 } 1005 1006 void __queue_cap_release(struct ceph_mds_session *session, 1007 u64 ino, u64 cap_id, u32 migrate_seq, 1008 u32 issue_seq) 1009 { 1010 struct ceph_msg *msg; 1011 struct ceph_mds_cap_release *head; 1012 struct ceph_mds_cap_item *item; 1013 1014 spin_lock(&session->s_cap_lock); 1015 BUG_ON(!session->s_num_cap_releases); 1016 msg = list_first_entry(&session->s_cap_releases, 1017 struct ceph_msg, list_head); 1018 1019 dout(" adding %llx release to mds%d msg %p (%d left)\n", 1020 ino, session->s_mds, msg, session->s_num_cap_releases); 1021 1022 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); 1023 head = msg->front.iov_base; 1024 le32_add_cpu(&head->num, 1); 1025 item = msg->front.iov_base + msg->front.iov_len; 1026 item->ino = cpu_to_le64(ino); 1027 item->cap_id = cpu_to_le64(cap_id); 1028 item->migrate_seq = cpu_to_le32(migrate_seq); 1029 item->seq = cpu_to_le32(issue_seq); 1030 1031 session->s_num_cap_releases--; 1032 1033 msg->front.iov_len += sizeof(*item); 1034 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { 1035 dout(" release msg %p full\n", msg); 1036 list_move_tail(&msg->list_head, &session->s_cap_releases_done); 1037 } else { 1038 dout(" release msg %p at %d/%d (%d)\n", msg, 1039 (int)le32_to_cpu(head->num), 1040 (int)CEPH_CAPS_PER_RELEASE, 1041 (int)msg->front.iov_len); 1042 } 1043 spin_unlock(&session->s_cap_lock); 1044 } 1045 1046 /* 1047 * Queue cap releases when an inode is dropped from our cache. Since 1048 * inode is about to be destroyed, there is no need for i_ceph_lock. 1049 */ 1050 void ceph_queue_caps_release(struct inode *inode) 1051 { 1052 struct ceph_inode_info *ci = ceph_inode(inode); 1053 struct rb_node *p; 1054 1055 p = rb_first(&ci->i_caps); 1056 while (p) { 1057 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 1058 struct ceph_mds_session *session = cap->session; 1059 1060 __queue_cap_release(session, ceph_ino(inode), cap->cap_id, 1061 cap->mseq, cap->issue_seq); 1062 p = rb_next(p); 1063 __ceph_remove_cap(cap); 1064 } 1065 } 1066 1067 /* 1068 * Send a cap msg on the given inode. Update our caps state, then 1069 * drop i_ceph_lock and send the message. 1070 * 1071 * Make note of max_size reported/requested from mds, revoked caps 1072 * that have now been implemented. 1073 * 1074 * Make half-hearted attempt ot to invalidate page cache if we are 1075 * dropping RDCACHE. Note that this will leave behind locked pages 1076 * that we'll then need to deal with elsewhere. 1077 * 1078 * Return non-zero if delayed release, or we experienced an error 1079 * such that the caller should requeue + retry later. 1080 * 1081 * called with i_ceph_lock, then drops it. 1082 * caller should hold snap_rwsem (read), s_mutex. 1083 */ 1084 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, 1085 int op, int used, int want, int retain, int flushing, 1086 unsigned *pflush_tid) 1087 __releases(cap->ci->i_ceph_lock) 1088 { 1089 struct ceph_inode_info *ci = cap->ci; 1090 struct inode *inode = &ci->vfs_inode; 1091 u64 cap_id = cap->cap_id; 1092 int held, revoking, dropping, keep; 1093 u64 seq, issue_seq, mseq, time_warp_seq, follows; 1094 u64 size, max_size; 1095 struct timespec mtime, atime; 1096 int wake = 0; 1097 umode_t mode; 1098 kuid_t uid; 1099 kgid_t gid; 1100 struct ceph_mds_session *session; 1101 u64 xattr_version = 0; 1102 struct ceph_buffer *xattr_blob = NULL; 1103 int delayed = 0; 1104 u64 flush_tid = 0; 1105 int i; 1106 int ret; 1107 1108 held = cap->issued | cap->implemented; 1109 revoking = cap->implemented & ~cap->issued; 1110 retain &= ~revoking; 1111 dropping = cap->issued & ~retain; 1112 1113 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", 1114 inode, cap, cap->session, 1115 ceph_cap_string(held), ceph_cap_string(held & retain), 1116 ceph_cap_string(revoking)); 1117 BUG_ON((retain & CEPH_CAP_PIN) == 0); 1118 1119 session = cap->session; 1120 1121 /* don't release wanted unless we've waited a bit. */ 1122 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && 1123 time_before(jiffies, ci->i_hold_caps_min)) { 1124 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n", 1125 ceph_cap_string(cap->issued), 1126 ceph_cap_string(cap->issued & retain), 1127 ceph_cap_string(cap->mds_wanted), 1128 ceph_cap_string(want)); 1129 want |= cap->mds_wanted; 1130 retain |= cap->issued; 1131 delayed = 1; 1132 } 1133 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); 1134 1135 cap->issued &= retain; /* drop bits we don't want */ 1136 if (cap->implemented & ~cap->issued) { 1137 /* 1138 * Wake up any waiters on wanted -> needed transition. 1139 * This is due to the weird transition from buffered 1140 * to sync IO... we need to flush dirty pages _before_ 1141 * allowing sync writes to avoid reordering. 1142 */ 1143 wake = 1; 1144 } 1145 cap->implemented &= cap->issued | used; 1146 cap->mds_wanted = want; 1147 1148 if (flushing) { 1149 /* 1150 * assign a tid for flush operations so we can avoid 1151 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark 1152 * clean type races. track latest tid for every bit 1153 * so we can handle flush AxFw, flush Fw, and have the 1154 * first ack clean Ax. 1155 */ 1156 flush_tid = ++ci->i_cap_flush_last_tid; 1157 if (pflush_tid) 1158 *pflush_tid = flush_tid; 1159 dout(" cap_flush_tid %d\n", (int)flush_tid); 1160 for (i = 0; i < CEPH_CAP_BITS; i++) 1161 if (flushing & (1 << i)) 1162 ci->i_cap_flush_tid[i] = flush_tid; 1163 1164 follows = ci->i_head_snapc->seq; 1165 } else { 1166 follows = 0; 1167 } 1168 1169 keep = cap->implemented; 1170 seq = cap->seq; 1171 issue_seq = cap->issue_seq; 1172 mseq = cap->mseq; 1173 size = inode->i_size; 1174 ci->i_reported_size = size; 1175 max_size = ci->i_wanted_max_size; 1176 ci->i_requested_max_size = max_size; 1177 mtime = inode->i_mtime; 1178 atime = inode->i_atime; 1179 time_warp_seq = ci->i_time_warp_seq; 1180 uid = inode->i_uid; 1181 gid = inode->i_gid; 1182 mode = inode->i_mode; 1183 1184 if (flushing & CEPH_CAP_XATTR_EXCL) { 1185 __ceph_build_xattrs_blob(ci); 1186 xattr_blob = ci->i_xattrs.blob; 1187 xattr_version = ci->i_xattrs.version; 1188 } 1189 1190 spin_unlock(&ci->i_ceph_lock); 1191 1192 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, 1193 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, 1194 size, max_size, &mtime, &atime, time_warp_seq, 1195 uid, gid, mode, xattr_version, xattr_blob, 1196 follows); 1197 if (ret < 0) { 1198 dout("error sending cap msg, must requeue %p\n", inode); 1199 delayed = 1; 1200 } 1201 1202 if (wake) 1203 wake_up_all(&ci->i_cap_wq); 1204 1205 return delayed; 1206 } 1207 1208 /* 1209 * When a snapshot is taken, clients accumulate dirty metadata on 1210 * inodes with capabilities in ceph_cap_snaps to describe the file 1211 * state at the time the snapshot was taken. This must be flushed 1212 * asynchronously back to the MDS once sync writes complete and dirty 1213 * data is written out. 1214 * 1215 * Unless @again is true, skip cap_snaps that were already sent to 1216 * the MDS (i.e., during this session). 1217 * 1218 * Called under i_ceph_lock. Takes s_mutex as needed. 1219 */ 1220 void __ceph_flush_snaps(struct ceph_inode_info *ci, 1221 struct ceph_mds_session **psession, 1222 int again) 1223 __releases(ci->i_ceph_lock) 1224 __acquires(ci->i_ceph_lock) 1225 { 1226 struct inode *inode = &ci->vfs_inode; 1227 int mds; 1228 struct ceph_cap_snap *capsnap; 1229 u32 mseq; 1230 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1231 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold 1232 session->s_mutex */ 1233 u64 next_follows = 0; /* keep track of how far we've gotten through the 1234 i_cap_snaps list, and skip these entries next time 1235 around to avoid an infinite loop */ 1236 1237 if (psession) 1238 session = *psession; 1239 1240 dout("__flush_snaps %p\n", inode); 1241 retry: 1242 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 1243 /* avoid an infiniute loop after retry */ 1244 if (capsnap->follows < next_follows) 1245 continue; 1246 /* 1247 * we need to wait for sync writes to complete and for dirty 1248 * pages to be written out. 1249 */ 1250 if (capsnap->dirty_pages || capsnap->writing) 1251 break; 1252 1253 /* 1254 * if cap writeback already occurred, we should have dropped 1255 * the capsnap in ceph_put_wrbuffer_cap_refs. 1256 */ 1257 BUG_ON(capsnap->dirty == 0); 1258 1259 /* pick mds, take s_mutex */ 1260 if (ci->i_auth_cap == NULL) { 1261 dout("no auth cap (migrating?), doing nothing\n"); 1262 goto out; 1263 } 1264 1265 /* only flush each capsnap once */ 1266 if (!again && !list_empty(&capsnap->flushing_item)) { 1267 dout("already flushed %p, skipping\n", capsnap); 1268 continue; 1269 } 1270 1271 mds = ci->i_auth_cap->session->s_mds; 1272 mseq = ci->i_auth_cap->mseq; 1273 1274 if (session && session->s_mds != mds) { 1275 dout("oops, wrong session %p mutex\n", session); 1276 mutex_unlock(&session->s_mutex); 1277 ceph_put_mds_session(session); 1278 session = NULL; 1279 } 1280 if (!session) { 1281 spin_unlock(&ci->i_ceph_lock); 1282 mutex_lock(&mdsc->mutex); 1283 session = __ceph_lookup_mds_session(mdsc, mds); 1284 mutex_unlock(&mdsc->mutex); 1285 if (session) { 1286 dout("inverting session/ino locks on %p\n", 1287 session); 1288 mutex_lock(&session->s_mutex); 1289 } 1290 /* 1291 * if session == NULL, we raced against a cap 1292 * deletion or migration. retry, and we'll 1293 * get a better @mds value next time. 1294 */ 1295 spin_lock(&ci->i_ceph_lock); 1296 goto retry; 1297 } 1298 1299 capsnap->flush_tid = ++ci->i_cap_flush_last_tid; 1300 atomic_inc(&capsnap->nref); 1301 if (!list_empty(&capsnap->flushing_item)) 1302 list_del_init(&capsnap->flushing_item); 1303 list_add_tail(&capsnap->flushing_item, 1304 &session->s_cap_snaps_flushing); 1305 spin_unlock(&ci->i_ceph_lock); 1306 1307 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n", 1308 inode, capsnap, capsnap->follows, capsnap->flush_tid); 1309 send_cap_msg(session, ceph_vino(inode).ino, 0, 1310 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, 1311 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, 1312 capsnap->size, 0, 1313 &capsnap->mtime, &capsnap->atime, 1314 capsnap->time_warp_seq, 1315 capsnap->uid, capsnap->gid, capsnap->mode, 1316 capsnap->xattr_version, capsnap->xattr_blob, 1317 capsnap->follows); 1318 1319 next_follows = capsnap->follows + 1; 1320 ceph_put_cap_snap(capsnap); 1321 1322 spin_lock(&ci->i_ceph_lock); 1323 goto retry; 1324 } 1325 1326 /* we flushed them all; remove this inode from the queue */ 1327 spin_lock(&mdsc->snap_flush_lock); 1328 list_del_init(&ci->i_snap_flush_item); 1329 spin_unlock(&mdsc->snap_flush_lock); 1330 1331 out: 1332 if (psession) 1333 *psession = session; 1334 else if (session) { 1335 mutex_unlock(&session->s_mutex); 1336 ceph_put_mds_session(session); 1337 } 1338 } 1339 1340 static void ceph_flush_snaps(struct ceph_inode_info *ci) 1341 { 1342 spin_lock(&ci->i_ceph_lock); 1343 __ceph_flush_snaps(ci, NULL, 0); 1344 spin_unlock(&ci->i_ceph_lock); 1345 } 1346 1347 /* 1348 * Mark caps dirty. If inode is newly dirty, return the dirty flags. 1349 * Caller is then responsible for calling __mark_inode_dirty with the 1350 * returned flags value. 1351 */ 1352 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) 1353 { 1354 struct ceph_mds_client *mdsc = 1355 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 1356 struct inode *inode = &ci->vfs_inode; 1357 int was = ci->i_dirty_caps; 1358 int dirty = 0; 1359 1360 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, 1361 ceph_cap_string(mask), ceph_cap_string(was), 1362 ceph_cap_string(was | mask)); 1363 ci->i_dirty_caps |= mask; 1364 if (was == 0) { 1365 if (!ci->i_head_snapc) 1366 ci->i_head_snapc = ceph_get_snap_context( 1367 ci->i_snap_realm->cached_context); 1368 dout(" inode %p now dirty snapc %p auth cap %p\n", 1369 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); 1370 BUG_ON(!list_empty(&ci->i_dirty_item)); 1371 spin_lock(&mdsc->cap_dirty_lock); 1372 if (ci->i_auth_cap) 1373 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); 1374 else 1375 list_add(&ci->i_dirty_item, 1376 &mdsc->cap_dirty_migrating); 1377 spin_unlock(&mdsc->cap_dirty_lock); 1378 if (ci->i_flushing_caps == 0) { 1379 ihold(inode); 1380 dirty |= I_DIRTY_SYNC; 1381 } 1382 } 1383 BUG_ON(list_empty(&ci->i_dirty_item)); 1384 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && 1385 (mask & CEPH_CAP_FILE_BUFFER)) 1386 dirty |= I_DIRTY_DATASYNC; 1387 __cap_delay_requeue(mdsc, ci); 1388 return dirty; 1389 } 1390 1391 /* 1392 * Add dirty inode to the flushing list. Assigned a seq number so we 1393 * can wait for caps to flush without starving. 1394 * 1395 * Called under i_ceph_lock. 1396 */ 1397 static int __mark_caps_flushing(struct inode *inode, 1398 struct ceph_mds_session *session) 1399 { 1400 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1401 struct ceph_inode_info *ci = ceph_inode(inode); 1402 int flushing; 1403 1404 BUG_ON(ci->i_dirty_caps == 0); 1405 BUG_ON(list_empty(&ci->i_dirty_item)); 1406 1407 flushing = ci->i_dirty_caps; 1408 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", 1409 ceph_cap_string(flushing), 1410 ceph_cap_string(ci->i_flushing_caps), 1411 ceph_cap_string(ci->i_flushing_caps | flushing)); 1412 ci->i_flushing_caps |= flushing; 1413 ci->i_dirty_caps = 0; 1414 dout(" inode %p now !dirty\n", inode); 1415 1416 spin_lock(&mdsc->cap_dirty_lock); 1417 list_del_init(&ci->i_dirty_item); 1418 1419 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; 1420 if (list_empty(&ci->i_flushing_item)) { 1421 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1422 mdsc->num_cap_flushing++; 1423 dout(" inode %p now flushing seq %lld\n", inode, 1424 ci->i_cap_flush_seq); 1425 } else { 1426 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1427 dout(" inode %p now flushing (more) seq %lld\n", inode, 1428 ci->i_cap_flush_seq); 1429 } 1430 spin_unlock(&mdsc->cap_dirty_lock); 1431 1432 return flushing; 1433 } 1434 1435 /* 1436 * try to invalidate mapping pages without blocking. 1437 */ 1438 static int try_nonblocking_invalidate(struct inode *inode) 1439 { 1440 struct ceph_inode_info *ci = ceph_inode(inode); 1441 u32 invalidating_gen = ci->i_rdcache_gen; 1442 1443 spin_unlock(&ci->i_ceph_lock); 1444 invalidate_mapping_pages(&inode->i_data, 0, -1); 1445 spin_lock(&ci->i_ceph_lock); 1446 1447 if (inode->i_data.nrpages == 0 && 1448 invalidating_gen == ci->i_rdcache_gen) { 1449 /* success. */ 1450 dout("try_nonblocking_invalidate %p success\n", inode); 1451 /* save any racing async invalidate some trouble */ 1452 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; 1453 return 0; 1454 } 1455 dout("try_nonblocking_invalidate %p failed\n", inode); 1456 return -1; 1457 } 1458 1459 /* 1460 * Swiss army knife function to examine currently used and wanted 1461 * versus held caps. Release, flush, ack revoked caps to mds as 1462 * appropriate. 1463 * 1464 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay 1465 * cap release further. 1466 * CHECK_CAPS_AUTHONLY - we should only check the auth cap 1467 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without 1468 * further delay. 1469 */ 1470 void ceph_check_caps(struct ceph_inode_info *ci, int flags, 1471 struct ceph_mds_session *session) 1472 { 1473 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1474 struct ceph_mds_client *mdsc = fsc->mdsc; 1475 struct inode *inode = &ci->vfs_inode; 1476 struct ceph_cap *cap; 1477 int file_wanted, used, cap_used; 1478 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ 1479 int issued, implemented, want, retain, revoking, flushing = 0; 1480 int mds = -1; /* keep track of how far we've gone through i_caps list 1481 to avoid an infinite loop on retry */ 1482 struct rb_node *p; 1483 int tried_invalidate = 0; 1484 int delayed = 0, sent = 0, force_requeue = 0, num; 1485 int queue_invalidate = 0; 1486 int is_delayed = flags & CHECK_CAPS_NODELAY; 1487 1488 /* if we are unmounting, flush any unused caps immediately. */ 1489 if (mdsc->stopping) 1490 is_delayed = 1; 1491 1492 spin_lock(&ci->i_ceph_lock); 1493 1494 if (ci->i_ceph_flags & CEPH_I_FLUSH) 1495 flags |= CHECK_CAPS_FLUSH; 1496 1497 /* flush snaps first time around only */ 1498 if (!list_empty(&ci->i_cap_snaps)) 1499 __ceph_flush_snaps(ci, &session, 0); 1500 goto retry_locked; 1501 retry: 1502 spin_lock(&ci->i_ceph_lock); 1503 retry_locked: 1504 file_wanted = __ceph_caps_file_wanted(ci); 1505 used = __ceph_caps_used(ci); 1506 want = file_wanted | used; 1507 issued = __ceph_caps_issued(ci, &implemented); 1508 revoking = implemented & ~issued; 1509 1510 retain = want | CEPH_CAP_PIN; 1511 if (!mdsc->stopping && inode->i_nlink > 0) { 1512 if (want) { 1513 retain |= CEPH_CAP_ANY; /* be greedy */ 1514 } else { 1515 retain |= CEPH_CAP_ANY_SHARED; 1516 /* 1517 * keep RD only if we didn't have the file open RW, 1518 * because then the mds would revoke it anyway to 1519 * journal max_size=0. 1520 */ 1521 if (ci->i_max_size == 0) 1522 retain |= CEPH_CAP_ANY_RD; 1523 } 1524 } 1525 1526 dout("check_caps %p file_want %s used %s dirty %s flushing %s" 1527 " issued %s revoking %s retain %s %s%s%s\n", inode, 1528 ceph_cap_string(file_wanted), 1529 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), 1530 ceph_cap_string(ci->i_flushing_caps), 1531 ceph_cap_string(issued), ceph_cap_string(revoking), 1532 ceph_cap_string(retain), 1533 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", 1534 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "", 1535 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : ""); 1536 1537 /* 1538 * If we no longer need to hold onto old our caps, and we may 1539 * have cached pages, but don't want them, then try to invalidate. 1540 * If we fail, it's because pages are locked.... try again later. 1541 */ 1542 if ((!is_delayed || mdsc->stopping) && 1543 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ 1544 inode->i_data.nrpages && /* have cached pages */ 1545 (file_wanted == 0 || /* no open files */ 1546 (revoking & (CEPH_CAP_FILE_CACHE| 1547 CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */ 1548 !tried_invalidate) { 1549 dout("check_caps trying to invalidate on %p\n", inode); 1550 if (try_nonblocking_invalidate(inode) < 0) { 1551 if (revoking & (CEPH_CAP_FILE_CACHE| 1552 CEPH_CAP_FILE_LAZYIO)) { 1553 dout("check_caps queuing invalidate\n"); 1554 queue_invalidate = 1; 1555 ci->i_rdcache_revoking = ci->i_rdcache_gen; 1556 } else { 1557 dout("check_caps failed to invalidate pages\n"); 1558 /* we failed to invalidate pages. check these 1559 caps again later. */ 1560 force_requeue = 1; 1561 __cap_set_timeouts(mdsc, ci); 1562 } 1563 } 1564 tried_invalidate = 1; 1565 goto retry_locked; 1566 } 1567 1568 num = 0; 1569 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 1570 cap = rb_entry(p, struct ceph_cap, ci_node); 1571 num++; 1572 1573 /* avoid looping forever */ 1574 if (mds >= cap->mds || 1575 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) 1576 continue; 1577 1578 /* NOTE: no side-effects allowed, until we take s_mutex */ 1579 1580 cap_used = used; 1581 if (ci->i_auth_cap && cap != ci->i_auth_cap) 1582 cap_used &= ~ci->i_auth_cap->issued; 1583 1584 revoking = cap->implemented & ~cap->issued; 1585 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n", 1586 cap->mds, cap, ceph_cap_string(cap->issued), 1587 ceph_cap_string(cap_used), 1588 ceph_cap_string(cap->implemented), 1589 ceph_cap_string(revoking)); 1590 1591 if (cap == ci->i_auth_cap && 1592 (cap->issued & CEPH_CAP_FILE_WR)) { 1593 /* request larger max_size from MDS? */ 1594 if (ci->i_wanted_max_size > ci->i_max_size && 1595 ci->i_wanted_max_size > ci->i_requested_max_size) { 1596 dout("requesting new max_size\n"); 1597 goto ack; 1598 } 1599 1600 /* approaching file_max? */ 1601 if ((inode->i_size << 1) >= ci->i_max_size && 1602 (ci->i_reported_size << 1) < ci->i_max_size) { 1603 dout("i_size approaching max_size\n"); 1604 goto ack; 1605 } 1606 } 1607 /* flush anything dirty? */ 1608 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) && 1609 ci->i_dirty_caps) { 1610 dout("flushing dirty caps\n"); 1611 goto ack; 1612 } 1613 1614 /* completed revocation? going down and there are no caps? */ 1615 if (revoking && (revoking & cap_used) == 0) { 1616 dout("completed revocation of %s\n", 1617 ceph_cap_string(cap->implemented & ~cap->issued)); 1618 goto ack; 1619 } 1620 1621 /* want more caps from mds? */ 1622 if (want & ~(cap->mds_wanted | cap->issued)) 1623 goto ack; 1624 1625 /* things we might delay */ 1626 if ((cap->issued & ~retain) == 0 && 1627 cap->mds_wanted == want) 1628 continue; /* nope, all good */ 1629 1630 if (is_delayed) 1631 goto ack; 1632 1633 /* delay? */ 1634 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && 1635 time_before(jiffies, ci->i_hold_caps_max)) { 1636 dout(" delaying issued %s -> %s, wanted %s -> %s\n", 1637 ceph_cap_string(cap->issued), 1638 ceph_cap_string(cap->issued & retain), 1639 ceph_cap_string(cap->mds_wanted), 1640 ceph_cap_string(want)); 1641 delayed++; 1642 continue; 1643 } 1644 1645 ack: 1646 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1647 dout(" skipping %p I_NOFLUSH set\n", inode); 1648 continue; 1649 } 1650 1651 if (session && session != cap->session) { 1652 dout("oops, wrong session %p mutex\n", session); 1653 mutex_unlock(&session->s_mutex); 1654 session = NULL; 1655 } 1656 if (!session) { 1657 session = cap->session; 1658 if (mutex_trylock(&session->s_mutex) == 0) { 1659 dout("inverting session/ino locks on %p\n", 1660 session); 1661 spin_unlock(&ci->i_ceph_lock); 1662 if (took_snap_rwsem) { 1663 up_read(&mdsc->snap_rwsem); 1664 took_snap_rwsem = 0; 1665 } 1666 mutex_lock(&session->s_mutex); 1667 goto retry; 1668 } 1669 } 1670 /* take snap_rwsem after session mutex */ 1671 if (!took_snap_rwsem) { 1672 if (down_read_trylock(&mdsc->snap_rwsem) == 0) { 1673 dout("inverting snap/in locks on %p\n", 1674 inode); 1675 spin_unlock(&ci->i_ceph_lock); 1676 down_read(&mdsc->snap_rwsem); 1677 took_snap_rwsem = 1; 1678 goto retry; 1679 } 1680 took_snap_rwsem = 1; 1681 } 1682 1683 if (cap == ci->i_auth_cap && ci->i_dirty_caps) 1684 flushing = __mark_caps_flushing(inode, session); 1685 else 1686 flushing = 0; 1687 1688 mds = cap->mds; /* remember mds, so we don't repeat */ 1689 sent++; 1690 1691 /* __send_cap drops i_ceph_lock */ 1692 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used, 1693 want, retain, flushing, NULL); 1694 goto retry; /* retake i_ceph_lock and restart our cap scan. */ 1695 } 1696 1697 /* 1698 * Reschedule delayed caps release if we delayed anything, 1699 * otherwise cancel. 1700 */ 1701 if (delayed && is_delayed) 1702 force_requeue = 1; /* __send_cap delayed release; requeue */ 1703 if (!delayed && !is_delayed) 1704 __cap_delay_cancel(mdsc, ci); 1705 else if (!is_delayed || force_requeue) 1706 __cap_delay_requeue(mdsc, ci); 1707 1708 spin_unlock(&ci->i_ceph_lock); 1709 1710 if (queue_invalidate) 1711 ceph_queue_invalidate(inode); 1712 1713 if (session) 1714 mutex_unlock(&session->s_mutex); 1715 if (took_snap_rwsem) 1716 up_read(&mdsc->snap_rwsem); 1717 } 1718 1719 /* 1720 * Try to flush dirty caps back to the auth mds. 1721 */ 1722 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, 1723 unsigned *flush_tid) 1724 { 1725 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1726 struct ceph_inode_info *ci = ceph_inode(inode); 1727 int unlock_session = session ? 0 : 1; 1728 int flushing = 0; 1729 1730 retry: 1731 spin_lock(&ci->i_ceph_lock); 1732 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1733 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); 1734 goto out; 1735 } 1736 if (ci->i_dirty_caps && ci->i_auth_cap) { 1737 struct ceph_cap *cap = ci->i_auth_cap; 1738 int used = __ceph_caps_used(ci); 1739 int want = __ceph_caps_wanted(ci); 1740 int delayed; 1741 1742 if (!session) { 1743 spin_unlock(&ci->i_ceph_lock); 1744 session = cap->session; 1745 mutex_lock(&session->s_mutex); 1746 goto retry; 1747 } 1748 BUG_ON(session != cap->session); 1749 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) 1750 goto out; 1751 1752 flushing = __mark_caps_flushing(inode, session); 1753 1754 /* __send_cap drops i_ceph_lock */ 1755 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, 1756 cap->issued | cap->implemented, flushing, 1757 flush_tid); 1758 if (!delayed) 1759 goto out_unlocked; 1760 1761 spin_lock(&ci->i_ceph_lock); 1762 __cap_delay_requeue(mdsc, ci); 1763 } 1764 out: 1765 spin_unlock(&ci->i_ceph_lock); 1766 out_unlocked: 1767 if (session && unlock_session) 1768 mutex_unlock(&session->s_mutex); 1769 return flushing; 1770 } 1771 1772 /* 1773 * Return true if we've flushed caps through the given flush_tid. 1774 */ 1775 static int caps_are_flushed(struct inode *inode, unsigned tid) 1776 { 1777 struct ceph_inode_info *ci = ceph_inode(inode); 1778 int i, ret = 1; 1779 1780 spin_lock(&ci->i_ceph_lock); 1781 for (i = 0; i < CEPH_CAP_BITS; i++) 1782 if ((ci->i_flushing_caps & (1 << i)) && 1783 ci->i_cap_flush_tid[i] <= tid) { 1784 /* still flushing this bit */ 1785 ret = 0; 1786 break; 1787 } 1788 spin_unlock(&ci->i_ceph_lock); 1789 return ret; 1790 } 1791 1792 /* 1793 * Wait on any unsafe replies for the given inode. First wait on the 1794 * newest request, and make that the upper bound. Then, if there are 1795 * more requests, keep waiting on the oldest as long as it is still older 1796 * than the original request. 1797 */ 1798 static void sync_write_wait(struct inode *inode) 1799 { 1800 struct ceph_inode_info *ci = ceph_inode(inode); 1801 struct list_head *head = &ci->i_unsafe_writes; 1802 struct ceph_osd_request *req; 1803 u64 last_tid; 1804 1805 spin_lock(&ci->i_unsafe_lock); 1806 if (list_empty(head)) 1807 goto out; 1808 1809 /* set upper bound as _last_ entry in chain */ 1810 req = list_entry(head->prev, struct ceph_osd_request, 1811 r_unsafe_item); 1812 last_tid = req->r_tid; 1813 1814 do { 1815 ceph_osdc_get_request(req); 1816 spin_unlock(&ci->i_unsafe_lock); 1817 dout("sync_write_wait on tid %llu (until %llu)\n", 1818 req->r_tid, last_tid); 1819 wait_for_completion(&req->r_safe_completion); 1820 spin_lock(&ci->i_unsafe_lock); 1821 ceph_osdc_put_request(req); 1822 1823 /* 1824 * from here on look at first entry in chain, since we 1825 * only want to wait for anything older than last_tid 1826 */ 1827 if (list_empty(head)) 1828 break; 1829 req = list_entry(head->next, struct ceph_osd_request, 1830 r_unsafe_item); 1831 } while (req->r_tid < last_tid); 1832 out: 1833 spin_unlock(&ci->i_unsafe_lock); 1834 } 1835 1836 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1837 { 1838 struct inode *inode = file->f_mapping->host; 1839 struct ceph_inode_info *ci = ceph_inode(inode); 1840 unsigned flush_tid; 1841 int ret; 1842 int dirty; 1843 1844 dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); 1845 sync_write_wait(inode); 1846 1847 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1848 if (ret < 0) 1849 return ret; 1850 mutex_lock(&inode->i_mutex); 1851 1852 dirty = try_flush_caps(inode, NULL, &flush_tid); 1853 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); 1854 1855 /* 1856 * only wait on non-file metadata writeback (the mds 1857 * can recover size and mtime, so we don't need to 1858 * wait for that) 1859 */ 1860 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { 1861 dout("fsync waiting for flush_tid %u\n", flush_tid); 1862 ret = wait_event_interruptible(ci->i_cap_wq, 1863 caps_are_flushed(inode, flush_tid)); 1864 } 1865 1866 dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); 1867 mutex_unlock(&inode->i_mutex); 1868 return ret; 1869 } 1870 1871 /* 1872 * Flush any dirty caps back to the mds. If we aren't asked to wait, 1873 * queue inode for flush but don't do so immediately, because we can 1874 * get by with fewer MDS messages if we wait for data writeback to 1875 * complete first. 1876 */ 1877 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) 1878 { 1879 struct ceph_inode_info *ci = ceph_inode(inode); 1880 unsigned flush_tid; 1881 int err = 0; 1882 int dirty; 1883 int wait = wbc->sync_mode == WB_SYNC_ALL; 1884 1885 dout("write_inode %p wait=%d\n", inode, wait); 1886 if (wait) { 1887 dirty = try_flush_caps(inode, NULL, &flush_tid); 1888 if (dirty) 1889 err = wait_event_interruptible(ci->i_cap_wq, 1890 caps_are_flushed(inode, flush_tid)); 1891 } else { 1892 struct ceph_mds_client *mdsc = 1893 ceph_sb_to_client(inode->i_sb)->mdsc; 1894 1895 spin_lock(&ci->i_ceph_lock); 1896 if (__ceph_caps_dirty(ci)) 1897 __cap_delay_requeue_front(mdsc, ci); 1898 spin_unlock(&ci->i_ceph_lock); 1899 } 1900 return err; 1901 } 1902 1903 /* 1904 * After a recovering MDS goes active, we need to resend any caps 1905 * we were flushing. 1906 * 1907 * Caller holds session->s_mutex. 1908 */ 1909 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, 1910 struct ceph_mds_session *session) 1911 { 1912 struct ceph_cap_snap *capsnap; 1913 1914 dout("kick_flushing_capsnaps mds%d\n", session->s_mds); 1915 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing, 1916 flushing_item) { 1917 struct ceph_inode_info *ci = capsnap->ci; 1918 struct inode *inode = &ci->vfs_inode; 1919 struct ceph_cap *cap; 1920 1921 spin_lock(&ci->i_ceph_lock); 1922 cap = ci->i_auth_cap; 1923 if (cap && cap->session == session) { 1924 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, 1925 cap, capsnap); 1926 __ceph_flush_snaps(ci, &session, 1); 1927 } else { 1928 pr_err("%p auth cap %p not mds%d ???\n", inode, 1929 cap, session->s_mds); 1930 } 1931 spin_unlock(&ci->i_ceph_lock); 1932 } 1933 } 1934 1935 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 1936 struct ceph_mds_session *session) 1937 { 1938 struct ceph_inode_info *ci; 1939 1940 kick_flushing_capsnaps(mdsc, session); 1941 1942 dout("kick_flushing_caps mds%d\n", session->s_mds); 1943 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 1944 struct inode *inode = &ci->vfs_inode; 1945 struct ceph_cap *cap; 1946 int delayed = 0; 1947 1948 spin_lock(&ci->i_ceph_lock); 1949 cap = ci->i_auth_cap; 1950 if (cap && cap->session == session) { 1951 dout("kick_flushing_caps %p cap %p %s\n", inode, 1952 cap, ceph_cap_string(ci->i_flushing_caps)); 1953 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, 1954 __ceph_caps_used(ci), 1955 __ceph_caps_wanted(ci), 1956 cap->issued | cap->implemented, 1957 ci->i_flushing_caps, NULL); 1958 if (delayed) { 1959 spin_lock(&ci->i_ceph_lock); 1960 __cap_delay_requeue(mdsc, ci); 1961 spin_unlock(&ci->i_ceph_lock); 1962 } 1963 } else { 1964 pr_err("%p auth cap %p not mds%d ???\n", inode, 1965 cap, session->s_mds); 1966 spin_unlock(&ci->i_ceph_lock); 1967 } 1968 } 1969 } 1970 1971 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, 1972 struct ceph_mds_session *session, 1973 struct inode *inode) 1974 { 1975 struct ceph_inode_info *ci = ceph_inode(inode); 1976 struct ceph_cap *cap; 1977 int delayed = 0; 1978 1979 spin_lock(&ci->i_ceph_lock); 1980 cap = ci->i_auth_cap; 1981 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode, 1982 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq); 1983 __ceph_flush_snaps(ci, &session, 1); 1984 if (ci->i_flushing_caps) { 1985 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, 1986 __ceph_caps_used(ci), 1987 __ceph_caps_wanted(ci), 1988 cap->issued | cap->implemented, 1989 ci->i_flushing_caps, NULL); 1990 if (delayed) { 1991 spin_lock(&ci->i_ceph_lock); 1992 __cap_delay_requeue(mdsc, ci); 1993 spin_unlock(&ci->i_ceph_lock); 1994 } 1995 } else { 1996 spin_unlock(&ci->i_ceph_lock); 1997 } 1998 } 1999 2000 2001 /* 2002 * Take references to capabilities we hold, so that we don't release 2003 * them to the MDS prematurely. 2004 * 2005 * Protected by i_ceph_lock. 2006 */ 2007 static void __take_cap_refs(struct ceph_inode_info *ci, int got) 2008 { 2009 if (got & CEPH_CAP_PIN) 2010 ci->i_pin_ref++; 2011 if (got & CEPH_CAP_FILE_RD) 2012 ci->i_rd_ref++; 2013 if (got & CEPH_CAP_FILE_CACHE) 2014 ci->i_rdcache_ref++; 2015 if (got & CEPH_CAP_FILE_WR) 2016 ci->i_wr_ref++; 2017 if (got & CEPH_CAP_FILE_BUFFER) { 2018 if (ci->i_wb_ref == 0) 2019 ihold(&ci->vfs_inode); 2020 ci->i_wb_ref++; 2021 dout("__take_cap_refs %p wb %d -> %d (?)\n", 2022 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); 2023 } 2024 } 2025 2026 /* 2027 * Try to grab cap references. Specify those refs we @want, and the 2028 * minimal set we @need. Also include the larger offset we are writing 2029 * to (when applicable), and check against max_size here as well. 2030 * Note that caller is responsible for ensuring max_size increases are 2031 * requested from the MDS. 2032 */ 2033 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, 2034 int *got, loff_t endoff, int *check_max, int *err) 2035 { 2036 struct inode *inode = &ci->vfs_inode; 2037 int ret = 0; 2038 int have, implemented; 2039 int file_wanted; 2040 2041 dout("get_cap_refs %p need %s want %s\n", inode, 2042 ceph_cap_string(need), ceph_cap_string(want)); 2043 spin_lock(&ci->i_ceph_lock); 2044 2045 /* make sure file is actually open */ 2046 file_wanted = __ceph_caps_file_wanted(ci); 2047 if ((file_wanted & need) == 0) { 2048 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n", 2049 ceph_cap_string(need), ceph_cap_string(file_wanted)); 2050 *err = -EBADF; 2051 ret = 1; 2052 goto out; 2053 } 2054 2055 /* finish pending truncate */ 2056 while (ci->i_truncate_pending) { 2057 spin_unlock(&ci->i_ceph_lock); 2058 __ceph_do_pending_vmtruncate(inode, !(need & CEPH_CAP_FILE_WR)); 2059 spin_lock(&ci->i_ceph_lock); 2060 } 2061 2062 if (need & CEPH_CAP_FILE_WR) { 2063 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { 2064 dout("get_cap_refs %p endoff %llu > maxsize %llu\n", 2065 inode, endoff, ci->i_max_size); 2066 if (endoff > ci->i_wanted_max_size) { 2067 *check_max = 1; 2068 ret = 1; 2069 } 2070 goto out; 2071 } 2072 /* 2073 * If a sync write is in progress, we must wait, so that we 2074 * can get a final snapshot value for size+mtime. 2075 */ 2076 if (__ceph_have_pending_cap_snap(ci)) { 2077 dout("get_cap_refs %p cap_snap_pending\n", inode); 2078 goto out; 2079 } 2080 } 2081 have = __ceph_caps_issued(ci, &implemented); 2082 2083 if ((have & need) == need) { 2084 /* 2085 * Look at (implemented & ~have & not) so that we keep waiting 2086 * on transition from wanted -> needed caps. This is needed 2087 * for WRBUFFER|WR -> WR to avoid a new WR sync write from 2088 * going before a prior buffered writeback happens. 2089 */ 2090 int not = want & ~(have & need); 2091 int revoking = implemented & ~have; 2092 dout("get_cap_refs %p have %s but not %s (revoking %s)\n", 2093 inode, ceph_cap_string(have), ceph_cap_string(not), 2094 ceph_cap_string(revoking)); 2095 if ((revoking & not) == 0) { 2096 *got = need | (have & want); 2097 __take_cap_refs(ci, *got); 2098 ret = 1; 2099 } 2100 } else { 2101 dout("get_cap_refs %p have %s needed %s\n", inode, 2102 ceph_cap_string(have), ceph_cap_string(need)); 2103 } 2104 out: 2105 spin_unlock(&ci->i_ceph_lock); 2106 dout("get_cap_refs %p ret %d got %s\n", inode, 2107 ret, ceph_cap_string(*got)); 2108 return ret; 2109 } 2110 2111 /* 2112 * Check the offset we are writing up to against our current 2113 * max_size. If necessary, tell the MDS we want to write to 2114 * a larger offset. 2115 */ 2116 static void check_max_size(struct inode *inode, loff_t endoff) 2117 { 2118 struct ceph_inode_info *ci = ceph_inode(inode); 2119 int check = 0; 2120 2121 /* do we need to explicitly request a larger max_size? */ 2122 spin_lock(&ci->i_ceph_lock); 2123 if ((endoff >= ci->i_max_size || 2124 endoff > (inode->i_size << 1)) && 2125 endoff > ci->i_wanted_max_size) { 2126 dout("write %p at large endoff %llu, req max_size\n", 2127 inode, endoff); 2128 ci->i_wanted_max_size = endoff; 2129 check = 1; 2130 } 2131 spin_unlock(&ci->i_ceph_lock); 2132 if (check) 2133 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2134 } 2135 2136 /* 2137 * Wait for caps, and take cap references. If we can't get a WR cap 2138 * due to a small max_size, make sure we check_max_size (and possibly 2139 * ask the mds) so we don't get hung up indefinitely. 2140 */ 2141 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got, 2142 loff_t endoff) 2143 { 2144 int check_max, ret, err; 2145 2146 retry: 2147 if (endoff > 0) 2148 check_max_size(&ci->vfs_inode, endoff); 2149 check_max = 0; 2150 err = 0; 2151 ret = wait_event_interruptible(ci->i_cap_wq, 2152 try_get_cap_refs(ci, need, want, 2153 got, endoff, 2154 &check_max, &err)); 2155 if (err) 2156 ret = err; 2157 if (check_max) 2158 goto retry; 2159 return ret; 2160 } 2161 2162 /* 2163 * Take cap refs. Caller must already know we hold at least one ref 2164 * on the caps in question or we don't know this is safe. 2165 */ 2166 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) 2167 { 2168 spin_lock(&ci->i_ceph_lock); 2169 __take_cap_refs(ci, caps); 2170 spin_unlock(&ci->i_ceph_lock); 2171 } 2172 2173 /* 2174 * Release cap refs. 2175 * 2176 * If we released the last ref on any given cap, call ceph_check_caps 2177 * to release (or schedule a release). 2178 * 2179 * If we are releasing a WR cap (from a sync write), finalize any affected 2180 * cap_snap, and wake up any waiters. 2181 */ 2182 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) 2183 { 2184 struct inode *inode = &ci->vfs_inode; 2185 int last = 0, put = 0, flushsnaps = 0, wake = 0; 2186 struct ceph_cap_snap *capsnap; 2187 2188 spin_lock(&ci->i_ceph_lock); 2189 if (had & CEPH_CAP_PIN) 2190 --ci->i_pin_ref; 2191 if (had & CEPH_CAP_FILE_RD) 2192 if (--ci->i_rd_ref == 0) 2193 last++; 2194 if (had & CEPH_CAP_FILE_CACHE) 2195 if (--ci->i_rdcache_ref == 0) 2196 last++; 2197 if (had & CEPH_CAP_FILE_BUFFER) { 2198 if (--ci->i_wb_ref == 0) { 2199 last++; 2200 put++; 2201 } 2202 dout("put_cap_refs %p wb %d -> %d (?)\n", 2203 inode, ci->i_wb_ref+1, ci->i_wb_ref); 2204 } 2205 if (had & CEPH_CAP_FILE_WR) 2206 if (--ci->i_wr_ref == 0) { 2207 last++; 2208 if (!list_empty(&ci->i_cap_snaps)) { 2209 capsnap = list_first_entry(&ci->i_cap_snaps, 2210 struct ceph_cap_snap, 2211 ci_item); 2212 if (capsnap->writing) { 2213 capsnap->writing = 0; 2214 flushsnaps = 2215 __ceph_finish_cap_snap(ci, 2216 capsnap); 2217 wake = 1; 2218 } 2219 } 2220 } 2221 spin_unlock(&ci->i_ceph_lock); 2222 2223 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), 2224 last ? " last" : "", put ? " put" : ""); 2225 2226 if (last && !flushsnaps) 2227 ceph_check_caps(ci, 0, NULL); 2228 else if (flushsnaps) 2229 ceph_flush_snaps(ci); 2230 if (wake) 2231 wake_up_all(&ci->i_cap_wq); 2232 if (put) 2233 iput(inode); 2234 } 2235 2236 /* 2237 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap 2238 * context. Adjust per-snap dirty page accounting as appropriate. 2239 * Once all dirty data for a cap_snap is flushed, flush snapped file 2240 * metadata back to the MDS. If we dropped the last ref, call 2241 * ceph_check_caps. 2242 */ 2243 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 2244 struct ceph_snap_context *snapc) 2245 { 2246 struct inode *inode = &ci->vfs_inode; 2247 int last = 0; 2248 int complete_capsnap = 0; 2249 int drop_capsnap = 0; 2250 int found = 0; 2251 struct ceph_cap_snap *capsnap = NULL; 2252 2253 spin_lock(&ci->i_ceph_lock); 2254 ci->i_wrbuffer_ref -= nr; 2255 last = !ci->i_wrbuffer_ref; 2256 2257 if (ci->i_head_snapc == snapc) { 2258 ci->i_wrbuffer_ref_head -= nr; 2259 if (ci->i_wrbuffer_ref_head == 0 && 2260 ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) { 2261 BUG_ON(!ci->i_head_snapc); 2262 ceph_put_snap_context(ci->i_head_snapc); 2263 ci->i_head_snapc = NULL; 2264 } 2265 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", 2266 inode, 2267 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, 2268 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 2269 last ? " LAST" : ""); 2270 } else { 2271 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2272 if (capsnap->context == snapc) { 2273 found = 1; 2274 break; 2275 } 2276 } 2277 BUG_ON(!found); 2278 capsnap->dirty_pages -= nr; 2279 if (capsnap->dirty_pages == 0) { 2280 complete_capsnap = 1; 2281 if (capsnap->dirty == 0) 2282 /* cap writeback completed before we created 2283 * the cap_snap; no FLUSHSNAP is needed */ 2284 drop_capsnap = 1; 2285 } 2286 dout("put_wrbuffer_cap_refs on %p cap_snap %p " 2287 " snap %lld %d/%d -> %d/%d %s%s%s\n", 2288 inode, capsnap, capsnap->context->seq, 2289 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 2290 ci->i_wrbuffer_ref, capsnap->dirty_pages, 2291 last ? " (wrbuffer last)" : "", 2292 complete_capsnap ? " (complete capsnap)" : "", 2293 drop_capsnap ? " (drop capsnap)" : ""); 2294 if (drop_capsnap) { 2295 ceph_put_snap_context(capsnap->context); 2296 list_del(&capsnap->ci_item); 2297 list_del(&capsnap->flushing_item); 2298 ceph_put_cap_snap(capsnap); 2299 } 2300 } 2301 2302 spin_unlock(&ci->i_ceph_lock); 2303 2304 if (last) { 2305 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2306 iput(inode); 2307 } else if (complete_capsnap) { 2308 ceph_flush_snaps(ci); 2309 wake_up_all(&ci->i_cap_wq); 2310 } 2311 if (drop_capsnap) 2312 iput(inode); 2313 } 2314 2315 /* 2316 * Handle a cap GRANT message from the MDS. (Note that a GRANT may 2317 * actually be a revocation if it specifies a smaller cap set.) 2318 * 2319 * caller holds s_mutex and i_ceph_lock, we drop both. 2320 * 2321 * return value: 2322 * 0 - ok 2323 * 1 - check_caps on auth cap only (writeback) 2324 * 2 - check_caps (ack revoke) 2325 */ 2326 static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, 2327 struct ceph_mds_session *session, 2328 struct ceph_cap *cap, 2329 struct ceph_buffer *xattr_buf) 2330 __releases(ci->i_ceph_lock) 2331 { 2332 struct ceph_inode_info *ci = ceph_inode(inode); 2333 int mds = session->s_mds; 2334 int seq = le32_to_cpu(grant->seq); 2335 int newcaps = le32_to_cpu(grant->caps); 2336 int issued, implemented, used, wanted, dirty; 2337 u64 size = le64_to_cpu(grant->size); 2338 u64 max_size = le64_to_cpu(grant->max_size); 2339 struct timespec mtime, atime, ctime; 2340 int check_caps = 0; 2341 int wake = 0; 2342 int writeback = 0; 2343 int revoked_rdcache = 0; 2344 int queue_invalidate = 0; 2345 2346 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 2347 inode, cap, mds, seq, ceph_cap_string(newcaps)); 2348 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 2349 inode->i_size); 2350 2351 /* 2352 * If CACHE is being revoked, and we have no dirty buffers, 2353 * try to invalidate (once). (If there are dirty buffers, we 2354 * will invalidate _after_ writeback.) 2355 */ 2356 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && 2357 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && 2358 !ci->i_wrbuffer_ref) { 2359 if (try_nonblocking_invalidate(inode) == 0) { 2360 revoked_rdcache = 1; 2361 } else { 2362 /* there were locked pages.. invalidate later 2363 in a separate thread. */ 2364 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 2365 queue_invalidate = 1; 2366 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2367 } 2368 } 2369 } 2370 2371 /* side effects now are allowed */ 2372 2373 issued = __ceph_caps_issued(ci, &implemented); 2374 issued |= implemented | __ceph_caps_dirty(ci); 2375 2376 cap->cap_gen = session->s_cap_gen; 2377 2378 __check_cap_issue(ci, cap, newcaps); 2379 2380 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 2381 inode->i_mode = le32_to_cpu(grant->mode); 2382 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid)); 2383 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid)); 2384 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 2385 from_kuid(&init_user_ns, inode->i_uid), 2386 from_kgid(&init_user_ns, inode->i_gid)); 2387 } 2388 2389 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 2390 set_nlink(inode, le32_to_cpu(grant->nlink)); 2391 2392 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) { 2393 int len = le32_to_cpu(grant->xattr_len); 2394 u64 version = le64_to_cpu(grant->xattr_version); 2395 2396 if (version > ci->i_xattrs.version) { 2397 dout(" got new xattrs v%llu on %p len %d\n", 2398 version, inode, len); 2399 if (ci->i_xattrs.blob) 2400 ceph_buffer_put(ci->i_xattrs.blob); 2401 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); 2402 ci->i_xattrs.version = version; 2403 } 2404 } 2405 2406 /* size/ctime/mtime/atime? */ 2407 ceph_fill_file_size(inode, issued, 2408 le32_to_cpu(grant->truncate_seq), 2409 le64_to_cpu(grant->truncate_size), size); 2410 ceph_decode_timespec(&mtime, &grant->mtime); 2411 ceph_decode_timespec(&atime, &grant->atime); 2412 ceph_decode_timespec(&ctime, &grant->ctime); 2413 ceph_fill_file_time(inode, issued, 2414 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime, 2415 &atime); 2416 2417 /* max size increase? */ 2418 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { 2419 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); 2420 ci->i_max_size = max_size; 2421 if (max_size >= ci->i_wanted_max_size) { 2422 ci->i_wanted_max_size = 0; /* reset */ 2423 ci->i_requested_max_size = 0; 2424 } 2425 wake = 1; 2426 } 2427 2428 /* check cap bits */ 2429 wanted = __ceph_caps_wanted(ci); 2430 used = __ceph_caps_used(ci); 2431 dirty = __ceph_caps_dirty(ci); 2432 dout(" my wanted = %s, used = %s, dirty %s\n", 2433 ceph_cap_string(wanted), 2434 ceph_cap_string(used), 2435 ceph_cap_string(dirty)); 2436 if (wanted != le32_to_cpu(grant->wanted)) { 2437 dout("mds wanted %s -> %s\n", 2438 ceph_cap_string(le32_to_cpu(grant->wanted)), 2439 ceph_cap_string(wanted)); 2440 /* imported cap may not have correct mds_wanted */ 2441 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) 2442 check_caps = 1; 2443 } 2444 2445 cap->seq = seq; 2446 2447 /* file layout may have changed */ 2448 ci->i_layout = grant->layout; 2449 2450 /* revocation, grant, or no-op? */ 2451 if (cap->issued & ~newcaps) { 2452 int revoking = cap->issued & ~newcaps; 2453 2454 dout("revocation: %s -> %s (revoking %s)\n", 2455 ceph_cap_string(cap->issued), 2456 ceph_cap_string(newcaps), 2457 ceph_cap_string(revoking)); 2458 if (revoking & used & CEPH_CAP_FILE_BUFFER) 2459 writeback = 1; /* initiate writeback; will delay ack */ 2460 else if (revoking == CEPH_CAP_FILE_CACHE && 2461 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && 2462 queue_invalidate) 2463 ; /* do nothing yet, invalidation will be queued */ 2464 else if (cap == ci->i_auth_cap) 2465 check_caps = 1; /* check auth cap only */ 2466 else 2467 check_caps = 2; /* check all caps */ 2468 cap->issued = newcaps; 2469 cap->implemented |= newcaps; 2470 } else if (cap->issued == newcaps) { 2471 dout("caps unchanged: %s -> %s\n", 2472 ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); 2473 } else { 2474 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), 2475 ceph_cap_string(newcaps)); 2476 cap->issued = newcaps; 2477 cap->implemented |= newcaps; /* add bits only, to 2478 * avoid stepping on a 2479 * pending revocation */ 2480 wake = 1; 2481 } 2482 BUG_ON(cap->issued & ~cap->implemented); 2483 2484 spin_unlock(&ci->i_ceph_lock); 2485 if (writeback) 2486 /* 2487 * queue inode for writeback: we can't actually call 2488 * filemap_write_and_wait, etc. from message handler 2489 * context. 2490 */ 2491 ceph_queue_writeback(inode); 2492 if (queue_invalidate) 2493 ceph_queue_invalidate(inode); 2494 if (wake) 2495 wake_up_all(&ci->i_cap_wq); 2496 2497 if (check_caps == 1) 2498 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, 2499 session); 2500 else if (check_caps == 2) 2501 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session); 2502 else 2503 mutex_unlock(&session->s_mutex); 2504 } 2505 2506 /* 2507 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the 2508 * MDS has been safely committed. 2509 */ 2510 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, 2511 struct ceph_mds_caps *m, 2512 struct ceph_mds_session *session, 2513 struct ceph_cap *cap) 2514 __releases(ci->i_ceph_lock) 2515 { 2516 struct ceph_inode_info *ci = ceph_inode(inode); 2517 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 2518 unsigned seq = le32_to_cpu(m->seq); 2519 int dirty = le32_to_cpu(m->dirty); 2520 int cleaned = 0; 2521 int drop = 0; 2522 int i; 2523 2524 for (i = 0; i < CEPH_CAP_BITS; i++) 2525 if ((dirty & (1 << i)) && 2526 flush_tid == ci->i_cap_flush_tid[i]) 2527 cleaned |= 1 << i; 2528 2529 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 2530 " flushing %s -> %s\n", 2531 inode, session->s_mds, seq, ceph_cap_string(dirty), 2532 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), 2533 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); 2534 2535 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned)) 2536 goto out; 2537 2538 ci->i_flushing_caps &= ~cleaned; 2539 2540 spin_lock(&mdsc->cap_dirty_lock); 2541 if (ci->i_flushing_caps == 0) { 2542 list_del_init(&ci->i_flushing_item); 2543 if (!list_empty(&session->s_cap_flushing)) 2544 dout(" mds%d still flushing cap on %p\n", 2545 session->s_mds, 2546 &list_entry(session->s_cap_flushing.next, 2547 struct ceph_inode_info, 2548 i_flushing_item)->vfs_inode); 2549 mdsc->num_cap_flushing--; 2550 wake_up_all(&mdsc->cap_flushing_wq); 2551 dout(" inode %p now !flushing\n", inode); 2552 2553 if (ci->i_dirty_caps == 0) { 2554 dout(" inode %p now clean\n", inode); 2555 BUG_ON(!list_empty(&ci->i_dirty_item)); 2556 drop = 1; 2557 if (ci->i_wrbuffer_ref_head == 0) { 2558 BUG_ON(!ci->i_head_snapc); 2559 ceph_put_snap_context(ci->i_head_snapc); 2560 ci->i_head_snapc = NULL; 2561 } 2562 } else { 2563 BUG_ON(list_empty(&ci->i_dirty_item)); 2564 } 2565 } 2566 spin_unlock(&mdsc->cap_dirty_lock); 2567 wake_up_all(&ci->i_cap_wq); 2568 2569 out: 2570 spin_unlock(&ci->i_ceph_lock); 2571 if (drop) 2572 iput(inode); 2573 } 2574 2575 /* 2576 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can 2577 * throw away our cap_snap. 2578 * 2579 * Caller hold s_mutex. 2580 */ 2581 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, 2582 struct ceph_mds_caps *m, 2583 struct ceph_mds_session *session) 2584 { 2585 struct ceph_inode_info *ci = ceph_inode(inode); 2586 u64 follows = le64_to_cpu(m->snap_follows); 2587 struct ceph_cap_snap *capsnap; 2588 int drop = 0; 2589 2590 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 2591 inode, ci, session->s_mds, follows); 2592 2593 spin_lock(&ci->i_ceph_lock); 2594 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2595 if (capsnap->follows == follows) { 2596 if (capsnap->flush_tid != flush_tid) { 2597 dout(" cap_snap %p follows %lld tid %lld !=" 2598 " %lld\n", capsnap, follows, 2599 flush_tid, capsnap->flush_tid); 2600 break; 2601 } 2602 WARN_ON(capsnap->dirty_pages || capsnap->writing); 2603 dout(" removing %p cap_snap %p follows %lld\n", 2604 inode, capsnap, follows); 2605 ceph_put_snap_context(capsnap->context); 2606 list_del(&capsnap->ci_item); 2607 list_del(&capsnap->flushing_item); 2608 ceph_put_cap_snap(capsnap); 2609 drop = 1; 2610 break; 2611 } else { 2612 dout(" skipping cap_snap %p follows %lld\n", 2613 capsnap, capsnap->follows); 2614 } 2615 } 2616 spin_unlock(&ci->i_ceph_lock); 2617 if (drop) 2618 iput(inode); 2619 } 2620 2621 /* 2622 * Handle TRUNC from MDS, indicating file truncation. 2623 * 2624 * caller hold s_mutex. 2625 */ 2626 static void handle_cap_trunc(struct inode *inode, 2627 struct ceph_mds_caps *trunc, 2628 struct ceph_mds_session *session) 2629 __releases(ci->i_ceph_lock) 2630 { 2631 struct ceph_inode_info *ci = ceph_inode(inode); 2632 int mds = session->s_mds; 2633 int seq = le32_to_cpu(trunc->seq); 2634 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); 2635 u64 truncate_size = le64_to_cpu(trunc->truncate_size); 2636 u64 size = le64_to_cpu(trunc->size); 2637 int implemented = 0; 2638 int dirty = __ceph_caps_dirty(ci); 2639 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented); 2640 int queue_trunc = 0; 2641 2642 issued |= implemented | dirty; 2643 2644 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n", 2645 inode, mds, seq, truncate_size, truncate_seq); 2646 queue_trunc = ceph_fill_file_size(inode, issued, 2647 truncate_seq, truncate_size, size); 2648 spin_unlock(&ci->i_ceph_lock); 2649 2650 if (queue_trunc) 2651 ceph_queue_vmtruncate(inode); 2652 } 2653 2654 /* 2655 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a 2656 * different one. If we are the most recent migration we've seen (as 2657 * indicated by mseq), make note of the migrating cap bits for the 2658 * duration (until we see the corresponding IMPORT). 2659 * 2660 * caller holds s_mutex 2661 */ 2662 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, 2663 struct ceph_mds_session *session, 2664 int *open_target_sessions) 2665 { 2666 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 2667 struct ceph_inode_info *ci = ceph_inode(inode); 2668 int mds = session->s_mds; 2669 unsigned mseq = le32_to_cpu(ex->migrate_seq); 2670 struct ceph_cap *cap = NULL, *t; 2671 struct rb_node *p; 2672 int remember = 1; 2673 2674 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n", 2675 inode, ci, mds, mseq); 2676 2677 spin_lock(&ci->i_ceph_lock); 2678 2679 /* make sure we haven't seen a higher mseq */ 2680 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 2681 t = rb_entry(p, struct ceph_cap, ci_node); 2682 if (ceph_seq_cmp(t->mseq, mseq) > 0) { 2683 dout(" higher mseq on cap from mds%d\n", 2684 t->session->s_mds); 2685 remember = 0; 2686 } 2687 if (t->session->s_mds == mds) 2688 cap = t; 2689 } 2690 2691 if (cap) { 2692 if (remember) { 2693 /* make note */ 2694 ci->i_cap_exporting_mds = mds; 2695 ci->i_cap_exporting_mseq = mseq; 2696 ci->i_cap_exporting_issued = cap->issued; 2697 2698 /* 2699 * make sure we have open sessions with all possible 2700 * export targets, so that we get the matching IMPORT 2701 */ 2702 *open_target_sessions = 1; 2703 2704 /* 2705 * we can't flush dirty caps that we've seen the 2706 * EXPORT but no IMPORT for 2707 */ 2708 spin_lock(&mdsc->cap_dirty_lock); 2709 if (!list_empty(&ci->i_dirty_item)) { 2710 dout(" moving %p to cap_dirty_migrating\n", 2711 inode); 2712 list_move(&ci->i_dirty_item, 2713 &mdsc->cap_dirty_migrating); 2714 } 2715 spin_unlock(&mdsc->cap_dirty_lock); 2716 } 2717 __ceph_remove_cap(cap); 2718 } 2719 /* else, we already released it */ 2720 2721 spin_unlock(&ci->i_ceph_lock); 2722 } 2723 2724 /* 2725 * Handle cap IMPORT. If there are temp bits from an older EXPORT, 2726 * clean them up. 2727 * 2728 * caller holds s_mutex. 2729 */ 2730 static void handle_cap_import(struct ceph_mds_client *mdsc, 2731 struct inode *inode, struct ceph_mds_caps *im, 2732 struct ceph_mds_session *session, 2733 void *snaptrace, int snaptrace_len) 2734 { 2735 struct ceph_inode_info *ci = ceph_inode(inode); 2736 int mds = session->s_mds; 2737 unsigned issued = le32_to_cpu(im->caps); 2738 unsigned wanted = le32_to_cpu(im->wanted); 2739 unsigned seq = le32_to_cpu(im->seq); 2740 unsigned mseq = le32_to_cpu(im->migrate_seq); 2741 u64 realmino = le64_to_cpu(im->realm); 2742 u64 cap_id = le64_to_cpu(im->cap_id); 2743 2744 if (ci->i_cap_exporting_mds >= 0 && 2745 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) { 2746 dout("handle_cap_import inode %p ci %p mds%d mseq %d" 2747 " - cleared exporting from mds%d\n", 2748 inode, ci, mds, mseq, 2749 ci->i_cap_exporting_mds); 2750 ci->i_cap_exporting_issued = 0; 2751 ci->i_cap_exporting_mseq = 0; 2752 ci->i_cap_exporting_mds = -1; 2753 2754 spin_lock(&mdsc->cap_dirty_lock); 2755 if (!list_empty(&ci->i_dirty_item)) { 2756 dout(" moving %p back to cap_dirty\n", inode); 2757 list_move(&ci->i_dirty_item, &mdsc->cap_dirty); 2758 } 2759 spin_unlock(&mdsc->cap_dirty_lock); 2760 } else { 2761 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", 2762 inode, ci, mds, mseq); 2763 } 2764 2765 down_write(&mdsc->snap_rwsem); 2766 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len, 2767 false); 2768 downgrade_write(&mdsc->snap_rwsem); 2769 ceph_add_cap(inode, session, cap_id, -1, 2770 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH, 2771 NULL /* no caps context */); 2772 kick_flushing_inode_caps(mdsc, session, inode); 2773 up_read(&mdsc->snap_rwsem); 2774 2775 /* make sure we re-request max_size, if necessary */ 2776 spin_lock(&ci->i_ceph_lock); 2777 ci->i_wanted_max_size = 0; /* reset */ 2778 ci->i_requested_max_size = 0; 2779 spin_unlock(&ci->i_ceph_lock); 2780 } 2781 2782 /* 2783 * Handle a caps message from the MDS. 2784 * 2785 * Identify the appropriate session, inode, and call the right handler 2786 * based on the cap op. 2787 */ 2788 void ceph_handle_caps(struct ceph_mds_session *session, 2789 struct ceph_msg *msg) 2790 { 2791 struct ceph_mds_client *mdsc = session->s_mdsc; 2792 struct super_block *sb = mdsc->fsc->sb; 2793 struct inode *inode; 2794 struct ceph_inode_info *ci; 2795 struct ceph_cap *cap; 2796 struct ceph_mds_caps *h; 2797 int mds = session->s_mds; 2798 int op; 2799 u32 seq, mseq; 2800 struct ceph_vino vino; 2801 u64 cap_id; 2802 u64 size, max_size; 2803 u64 tid; 2804 void *snaptrace; 2805 size_t snaptrace_len; 2806 void *flock; 2807 u32 flock_len; 2808 int open_target_sessions = 0; 2809 2810 dout("handle_caps from mds%d\n", mds); 2811 2812 /* decode */ 2813 tid = le64_to_cpu(msg->hdr.tid); 2814 if (msg->front.iov_len < sizeof(*h)) 2815 goto bad; 2816 h = msg->front.iov_base; 2817 op = le32_to_cpu(h->op); 2818 vino.ino = le64_to_cpu(h->ino); 2819 vino.snap = CEPH_NOSNAP; 2820 cap_id = le64_to_cpu(h->cap_id); 2821 seq = le32_to_cpu(h->seq); 2822 mseq = le32_to_cpu(h->migrate_seq); 2823 size = le64_to_cpu(h->size); 2824 max_size = le64_to_cpu(h->max_size); 2825 2826 snaptrace = h + 1; 2827 snaptrace_len = le32_to_cpu(h->snap_trace_len); 2828 2829 if (le16_to_cpu(msg->hdr.version) >= 2) { 2830 void *p, *end; 2831 2832 p = snaptrace + snaptrace_len; 2833 end = msg->front.iov_base + msg->front.iov_len; 2834 ceph_decode_32_safe(&p, end, flock_len, bad); 2835 flock = p; 2836 } else { 2837 flock = NULL; 2838 flock_len = 0; 2839 } 2840 2841 mutex_lock(&session->s_mutex); 2842 session->s_seq++; 2843 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, 2844 (unsigned)seq); 2845 2846 if (op == CEPH_CAP_OP_IMPORT) 2847 ceph_add_cap_releases(mdsc, session); 2848 2849 /* lookup ino */ 2850 inode = ceph_find_inode(sb, vino); 2851 ci = ceph_inode(inode); 2852 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 2853 vino.snap, inode); 2854 if (!inode) { 2855 dout(" i don't have ino %llx\n", vino.ino); 2856 2857 if (op == CEPH_CAP_OP_IMPORT) 2858 __queue_cap_release(session, vino.ino, cap_id, 2859 mseq, seq); 2860 goto flush_cap_releases; 2861 } 2862 2863 /* these will work even if we don't have a cap yet */ 2864 switch (op) { 2865 case CEPH_CAP_OP_FLUSHSNAP_ACK: 2866 handle_cap_flushsnap_ack(inode, tid, h, session); 2867 goto done; 2868 2869 case CEPH_CAP_OP_EXPORT: 2870 handle_cap_export(inode, h, session, &open_target_sessions); 2871 goto done; 2872 2873 case CEPH_CAP_OP_IMPORT: 2874 handle_cap_import(mdsc, inode, h, session, 2875 snaptrace, snaptrace_len); 2876 } 2877 2878 /* the rest require a cap */ 2879 spin_lock(&ci->i_ceph_lock); 2880 cap = __get_cap_for_mds(ceph_inode(inode), mds); 2881 if (!cap) { 2882 dout(" no cap on %p ino %llx.%llx from mds%d\n", 2883 inode, ceph_ino(inode), ceph_snap(inode), mds); 2884 spin_unlock(&ci->i_ceph_lock); 2885 goto flush_cap_releases; 2886 } 2887 2888 /* note that each of these drops i_ceph_lock for us */ 2889 switch (op) { 2890 case CEPH_CAP_OP_REVOKE: 2891 case CEPH_CAP_OP_GRANT: 2892 case CEPH_CAP_OP_IMPORT: 2893 handle_cap_grant(inode, h, session, cap, msg->middle); 2894 goto done_unlocked; 2895 2896 case CEPH_CAP_OP_FLUSH_ACK: 2897 handle_cap_flush_ack(inode, tid, h, session, cap); 2898 break; 2899 2900 case CEPH_CAP_OP_TRUNC: 2901 handle_cap_trunc(inode, h, session); 2902 break; 2903 2904 default: 2905 spin_unlock(&ci->i_ceph_lock); 2906 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 2907 ceph_cap_op_name(op)); 2908 } 2909 2910 goto done; 2911 2912 flush_cap_releases: 2913 /* 2914 * send any full release message to try to move things 2915 * along for the mds (who clearly thinks we still have this 2916 * cap). 2917 */ 2918 ceph_add_cap_releases(mdsc, session); 2919 ceph_send_cap_releases(mdsc, session); 2920 2921 done: 2922 mutex_unlock(&session->s_mutex); 2923 done_unlocked: 2924 if (inode) 2925 iput(inode); 2926 if (open_target_sessions) 2927 ceph_mdsc_open_export_target_sessions(mdsc, session); 2928 return; 2929 2930 bad: 2931 pr_err("ceph_handle_caps: corrupt message\n"); 2932 ceph_msg_dump(msg); 2933 return; 2934 } 2935 2936 /* 2937 * Delayed work handler to process end of delayed cap release LRU list. 2938 */ 2939 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) 2940 { 2941 struct ceph_inode_info *ci; 2942 int flags = CHECK_CAPS_NODELAY; 2943 2944 dout("check_delayed_caps\n"); 2945 while (1) { 2946 spin_lock(&mdsc->cap_delay_lock); 2947 if (list_empty(&mdsc->cap_delay_list)) 2948 break; 2949 ci = list_first_entry(&mdsc->cap_delay_list, 2950 struct ceph_inode_info, 2951 i_cap_delay_list); 2952 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && 2953 time_before(jiffies, ci->i_hold_caps_max)) 2954 break; 2955 list_del_init(&ci->i_cap_delay_list); 2956 spin_unlock(&mdsc->cap_delay_lock); 2957 dout("check_delayed_caps on %p\n", &ci->vfs_inode); 2958 ceph_check_caps(ci, flags, NULL); 2959 } 2960 spin_unlock(&mdsc->cap_delay_lock); 2961 } 2962 2963 /* 2964 * Flush all dirty caps to the mds 2965 */ 2966 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) 2967 { 2968 struct ceph_inode_info *ci; 2969 struct inode *inode; 2970 2971 dout("flush_dirty_caps\n"); 2972 spin_lock(&mdsc->cap_dirty_lock); 2973 while (!list_empty(&mdsc->cap_dirty)) { 2974 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, 2975 i_dirty_item); 2976 inode = &ci->vfs_inode; 2977 ihold(inode); 2978 dout("flush_dirty_caps %p\n", inode); 2979 spin_unlock(&mdsc->cap_dirty_lock); 2980 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL); 2981 iput(inode); 2982 spin_lock(&mdsc->cap_dirty_lock); 2983 } 2984 spin_unlock(&mdsc->cap_dirty_lock); 2985 dout("flush_dirty_caps done\n"); 2986 } 2987 2988 /* 2989 * Drop open file reference. If we were the last open file, 2990 * we may need to release capabilities to the MDS (or schedule 2991 * their delayed release). 2992 */ 2993 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) 2994 { 2995 struct inode *inode = &ci->vfs_inode; 2996 int last = 0; 2997 2998 spin_lock(&ci->i_ceph_lock); 2999 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode, 3000 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); 3001 BUG_ON(ci->i_nr_by_mode[fmode] == 0); 3002 if (--ci->i_nr_by_mode[fmode] == 0) 3003 last++; 3004 spin_unlock(&ci->i_ceph_lock); 3005 3006 if (last && ci->i_vino.snap == CEPH_NOSNAP) 3007 ceph_check_caps(ci, 0, NULL); 3008 } 3009 3010 /* 3011 * Helpers for embedding cap and dentry lease releases into mds 3012 * requests. 3013 * 3014 * @force is used by dentry_release (below) to force inclusion of a 3015 * record for the directory inode, even when there aren't any caps to 3016 * drop. 3017 */ 3018 int ceph_encode_inode_release(void **p, struct inode *inode, 3019 int mds, int drop, int unless, int force) 3020 { 3021 struct ceph_inode_info *ci = ceph_inode(inode); 3022 struct ceph_cap *cap; 3023 struct ceph_mds_request_release *rel = *p; 3024 int used, dirty; 3025 int ret = 0; 3026 3027 spin_lock(&ci->i_ceph_lock); 3028 used = __ceph_caps_used(ci); 3029 dirty = __ceph_caps_dirty(ci); 3030 3031 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n", 3032 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop), 3033 ceph_cap_string(unless)); 3034 3035 /* only drop unused, clean caps */ 3036 drop &= ~(used | dirty); 3037 3038 cap = __get_cap_for_mds(ci, mds); 3039 if (cap && __cap_is_valid(cap)) { 3040 if (force || 3041 ((cap->issued & drop) && 3042 (cap->issued & unless) == 0)) { 3043 if ((cap->issued & drop) && 3044 (cap->issued & unless) == 0) { 3045 dout("encode_inode_release %p cap %p %s -> " 3046 "%s\n", inode, cap, 3047 ceph_cap_string(cap->issued), 3048 ceph_cap_string(cap->issued & ~drop)); 3049 cap->issued &= ~drop; 3050 cap->implemented &= ~drop; 3051 if (ci->i_ceph_flags & CEPH_I_NODELAY) { 3052 int wanted = __ceph_caps_wanted(ci); 3053 dout(" wanted %s -> %s (act %s)\n", 3054 ceph_cap_string(cap->mds_wanted), 3055 ceph_cap_string(cap->mds_wanted & 3056 ~wanted), 3057 ceph_cap_string(wanted)); 3058 cap->mds_wanted &= wanted; 3059 } 3060 } else { 3061 dout("encode_inode_release %p cap %p %s" 3062 " (force)\n", inode, cap, 3063 ceph_cap_string(cap->issued)); 3064 } 3065 3066 rel->ino = cpu_to_le64(ceph_ino(inode)); 3067 rel->cap_id = cpu_to_le64(cap->cap_id); 3068 rel->seq = cpu_to_le32(cap->seq); 3069 rel->issue_seq = cpu_to_le32(cap->issue_seq), 3070 rel->mseq = cpu_to_le32(cap->mseq); 3071 rel->caps = cpu_to_le32(cap->issued); 3072 rel->wanted = cpu_to_le32(cap->mds_wanted); 3073 rel->dname_len = 0; 3074 rel->dname_seq = 0; 3075 *p += sizeof(*rel); 3076 ret = 1; 3077 } else { 3078 dout("encode_inode_release %p cap %p %s\n", 3079 inode, cap, ceph_cap_string(cap->issued)); 3080 } 3081 } 3082 spin_unlock(&ci->i_ceph_lock); 3083 return ret; 3084 } 3085 3086 int ceph_encode_dentry_release(void **p, struct dentry *dentry, 3087 int mds, int drop, int unless) 3088 { 3089 struct inode *dir = dentry->d_parent->d_inode; 3090 struct ceph_mds_request_release *rel = *p; 3091 struct ceph_dentry_info *di = ceph_dentry(dentry); 3092 int force = 0; 3093 int ret; 3094 3095 /* 3096 * force an record for the directory caps if we have a dentry lease. 3097 * this is racy (can't take i_ceph_lock and d_lock together), but it 3098 * doesn't have to be perfect; the mds will revoke anything we don't 3099 * release. 3100 */ 3101 spin_lock(&dentry->d_lock); 3102 if (di->lease_session && di->lease_session->s_mds == mds) 3103 force = 1; 3104 spin_unlock(&dentry->d_lock); 3105 3106 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); 3107 3108 spin_lock(&dentry->d_lock); 3109 if (ret && di->lease_session && di->lease_session->s_mds == mds) { 3110 dout("encode_dentry_release %p mds%d seq %d\n", 3111 dentry, mds, (int)di->lease_seq); 3112 rel->dname_len = cpu_to_le32(dentry->d_name.len); 3113 memcpy(*p, dentry->d_name.name, dentry->d_name.len); 3114 *p += dentry->d_name.len; 3115 rel->dname_seq = cpu_to_le32(di->lease_seq); 3116 __ceph_mdsc_drop_dentry_lease(dentry); 3117 } 3118 spin_unlock(&dentry->d_lock); 3119 return ret; 3120 } 3121