1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/sort.h> 4 #include <linux/slab.h> 5 6 #include "super.h" 7 #include "mds_client.h" 8 9 #include <linux/ceph/decode.h> 10 11 /* 12 * Snapshots in ceph are driven in large part by cooperation from the 13 * client. In contrast to local file systems or file servers that 14 * implement snapshots at a single point in the system, ceph's 15 * distributed access to storage requires clients to help decide 16 * whether a write logically occurs before or after a recently created 17 * snapshot. 18 * 19 * This provides a perfect instantanous client-wide snapshot. Between 20 * clients, however, snapshots may appear to be applied at slightly 21 * different points in time, depending on delays in delivering the 22 * snapshot notification. 23 * 24 * Snapshots are _not_ file system-wide. Instead, each snapshot 25 * applies to the subdirectory nested beneath some directory. This 26 * effectively divides the hierarchy into multiple "realms," where all 27 * of the files contained by each realm share the same set of 28 * snapshots. An individual realm's snap set contains snapshots 29 * explicitly created on that realm, as well as any snaps in its 30 * parent's snap set _after_ the point at which the parent became it's 31 * parent (due to, say, a rename). Similarly, snaps from prior parents 32 * during the time intervals during which they were the parent are included. 33 * 34 * The client is spared most of this detail, fortunately... it must only 35 * maintains a hierarchy of realms reflecting the current parent/child 36 * realm relationship, and for each realm has an explicit list of snaps 37 * inherited from prior parents. 38 * 39 * A snap_realm struct is maintained for realms containing every inode 40 * with an open cap in the system. (The needed snap realm information is 41 * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq' 42 * version number is used to ensure that as realm parameters change (new 43 * snapshot, new parent, etc.) the client's realm hierarchy is updated. 44 * 45 * The realm hierarchy drives the generation of a 'snap context' for each 46 * realm, which simply lists the resulting set of snaps for the realm. This 47 * is attached to any writes sent to OSDs. 48 */ 49 /* 50 * Unfortunately error handling is a bit mixed here. If we get a snap 51 * update, but don't have enough memory to update our realm hierarchy, 52 * it's not clear what we can do about it (besides complaining to the 53 * console). 54 */ 55 56 57 /* 58 * increase ref count for the realm 59 * 60 * caller must hold snap_rwsem for write. 61 */ 62 void ceph_get_snap_realm(struct ceph_mds_client *mdsc, 63 struct ceph_snap_realm *realm) 64 { 65 dout("get_realm %p %d -> %d\n", realm, 66 atomic_read(&realm->nref), atomic_read(&realm->nref)+1); 67 /* 68 * since we _only_ increment realm refs or empty the empty 69 * list with snap_rwsem held, adjusting the empty list here is 70 * safe. we do need to protect against concurrent empty list 71 * additions, however. 72 */ 73 if (atomic_read(&realm->nref) == 0) { 74 spin_lock(&mdsc->snap_empty_lock); 75 list_del_init(&realm->empty_item); 76 spin_unlock(&mdsc->snap_empty_lock); 77 } 78 79 atomic_inc(&realm->nref); 80 } 81 82 static void __insert_snap_realm(struct rb_root *root, 83 struct ceph_snap_realm *new) 84 { 85 struct rb_node **p = &root->rb_node; 86 struct rb_node *parent = NULL; 87 struct ceph_snap_realm *r = NULL; 88 89 while (*p) { 90 parent = *p; 91 r = rb_entry(parent, struct ceph_snap_realm, node); 92 if (new->ino < r->ino) 93 p = &(*p)->rb_left; 94 else if (new->ino > r->ino) 95 p = &(*p)->rb_right; 96 else 97 BUG(); 98 } 99 100 rb_link_node(&new->node, parent, p); 101 rb_insert_color(&new->node, root); 102 } 103 104 /* 105 * create and get the realm rooted at @ino and bump its ref count. 106 * 107 * caller must hold snap_rwsem for write. 108 */ 109 static struct ceph_snap_realm *ceph_create_snap_realm( 110 struct ceph_mds_client *mdsc, 111 u64 ino) 112 { 113 struct ceph_snap_realm *realm; 114 115 realm = kzalloc(sizeof(*realm), GFP_NOFS); 116 if (!realm) 117 return ERR_PTR(-ENOMEM); 118 119 atomic_set(&realm->nref, 0); /* tree does not take a ref */ 120 realm->ino = ino; 121 INIT_LIST_HEAD(&realm->children); 122 INIT_LIST_HEAD(&realm->child_item); 123 INIT_LIST_HEAD(&realm->empty_item); 124 INIT_LIST_HEAD(&realm->dirty_item); 125 INIT_LIST_HEAD(&realm->inodes_with_caps); 126 spin_lock_init(&realm->inodes_with_caps_lock); 127 __insert_snap_realm(&mdsc->snap_realms, realm); 128 dout("create_snap_realm %llx %p\n", realm->ino, realm); 129 return realm; 130 } 131 132 /* 133 * lookup the realm rooted at @ino. 134 * 135 * caller must hold snap_rwsem for write. 136 */ 137 struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, 138 u64 ino) 139 { 140 struct rb_node *n = mdsc->snap_realms.rb_node; 141 struct ceph_snap_realm *r; 142 143 while (n) { 144 r = rb_entry(n, struct ceph_snap_realm, node); 145 if (ino < r->ino) 146 n = n->rb_left; 147 else if (ino > r->ino) 148 n = n->rb_right; 149 else { 150 dout("lookup_snap_realm %llx %p\n", r->ino, r); 151 return r; 152 } 153 } 154 return NULL; 155 } 156 157 static void __put_snap_realm(struct ceph_mds_client *mdsc, 158 struct ceph_snap_realm *realm); 159 160 /* 161 * called with snap_rwsem (write) 162 */ 163 static void __destroy_snap_realm(struct ceph_mds_client *mdsc, 164 struct ceph_snap_realm *realm) 165 { 166 dout("__destroy_snap_realm %p %llx\n", realm, realm->ino); 167 168 rb_erase(&realm->node, &mdsc->snap_realms); 169 170 if (realm->parent) { 171 list_del_init(&realm->child_item); 172 __put_snap_realm(mdsc, realm->parent); 173 } 174 175 kfree(realm->prior_parent_snaps); 176 kfree(realm->snaps); 177 ceph_put_snap_context(realm->cached_context); 178 kfree(realm); 179 } 180 181 /* 182 * caller holds snap_rwsem (write) 183 */ 184 static void __put_snap_realm(struct ceph_mds_client *mdsc, 185 struct ceph_snap_realm *realm) 186 { 187 dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, 188 atomic_read(&realm->nref), atomic_read(&realm->nref)-1); 189 if (atomic_dec_and_test(&realm->nref)) 190 __destroy_snap_realm(mdsc, realm); 191 } 192 193 /* 194 * caller needn't hold any locks 195 */ 196 void ceph_put_snap_realm(struct ceph_mds_client *mdsc, 197 struct ceph_snap_realm *realm) 198 { 199 dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, 200 atomic_read(&realm->nref), atomic_read(&realm->nref)-1); 201 if (!atomic_dec_and_test(&realm->nref)) 202 return; 203 204 if (down_write_trylock(&mdsc->snap_rwsem)) { 205 __destroy_snap_realm(mdsc, realm); 206 up_write(&mdsc->snap_rwsem); 207 } else { 208 spin_lock(&mdsc->snap_empty_lock); 209 list_add(&realm->empty_item, &mdsc->snap_empty); 210 spin_unlock(&mdsc->snap_empty_lock); 211 } 212 } 213 214 /* 215 * Clean up any realms whose ref counts have dropped to zero. Note 216 * that this does not include realms who were created but not yet 217 * used. 218 * 219 * Called under snap_rwsem (write) 220 */ 221 static void __cleanup_empty_realms(struct ceph_mds_client *mdsc) 222 { 223 struct ceph_snap_realm *realm; 224 225 spin_lock(&mdsc->snap_empty_lock); 226 while (!list_empty(&mdsc->snap_empty)) { 227 realm = list_first_entry(&mdsc->snap_empty, 228 struct ceph_snap_realm, empty_item); 229 list_del(&realm->empty_item); 230 spin_unlock(&mdsc->snap_empty_lock); 231 __destroy_snap_realm(mdsc, realm); 232 spin_lock(&mdsc->snap_empty_lock); 233 } 234 spin_unlock(&mdsc->snap_empty_lock); 235 } 236 237 void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc) 238 { 239 down_write(&mdsc->snap_rwsem); 240 __cleanup_empty_realms(mdsc); 241 up_write(&mdsc->snap_rwsem); 242 } 243 244 /* 245 * adjust the parent realm of a given @realm. adjust child list, and parent 246 * pointers, and ref counts appropriately. 247 * 248 * return true if parent was changed, 0 if unchanged, <0 on error. 249 * 250 * caller must hold snap_rwsem for write. 251 */ 252 static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, 253 struct ceph_snap_realm *realm, 254 u64 parentino) 255 { 256 struct ceph_snap_realm *parent; 257 258 if (realm->parent_ino == parentino) 259 return 0; 260 261 parent = ceph_lookup_snap_realm(mdsc, parentino); 262 if (!parent) { 263 parent = ceph_create_snap_realm(mdsc, parentino); 264 if (IS_ERR(parent)) 265 return PTR_ERR(parent); 266 } 267 dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n", 268 realm->ino, realm, realm->parent_ino, realm->parent, 269 parentino, parent); 270 if (realm->parent) { 271 list_del_init(&realm->child_item); 272 ceph_put_snap_realm(mdsc, realm->parent); 273 } 274 realm->parent_ino = parentino; 275 realm->parent = parent; 276 ceph_get_snap_realm(mdsc, parent); 277 list_add(&realm->child_item, &parent->children); 278 return 1; 279 } 280 281 282 static int cmpu64_rev(const void *a, const void *b) 283 { 284 if (*(u64 *)a < *(u64 *)b) 285 return 1; 286 if (*(u64 *)a > *(u64 *)b) 287 return -1; 288 return 0; 289 } 290 291 /* 292 * build the snap context for a given realm. 293 */ 294 static int build_snap_context(struct ceph_snap_realm *realm) 295 { 296 struct ceph_snap_realm *parent = realm->parent; 297 struct ceph_snap_context *snapc; 298 int err = 0; 299 u32 num = realm->num_prior_parent_snaps + realm->num_snaps; 300 301 /* 302 * build parent context, if it hasn't been built. 303 * conservatively estimate that all parent snaps might be 304 * included by us. 305 */ 306 if (parent) { 307 if (!parent->cached_context) { 308 err = build_snap_context(parent); 309 if (err) 310 goto fail; 311 } 312 num += parent->cached_context->num_snaps; 313 } 314 315 /* do i actually need to update? not if my context seq 316 matches realm seq, and my parents' does to. (this works 317 because we rebuild_snap_realms() works _downward_ in 318 hierarchy after each update.) */ 319 if (realm->cached_context && 320 realm->cached_context->seq == realm->seq && 321 (!parent || 322 realm->cached_context->seq >= parent->cached_context->seq)) { 323 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)" 324 " (unchanged)\n", 325 realm->ino, realm, realm->cached_context, 326 realm->cached_context->seq, 327 (unsigned int) realm->cached_context->num_snaps); 328 return 0; 329 } 330 331 /* alloc new snap context */ 332 err = -ENOMEM; 333 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) 334 goto fail; 335 snapc = ceph_create_snap_context(num, GFP_NOFS); 336 if (!snapc) 337 goto fail; 338 339 /* build (reverse sorted) snap vector */ 340 num = 0; 341 snapc->seq = realm->seq; 342 if (parent) { 343 u32 i; 344 345 /* include any of parent's snaps occurring _after_ my 346 parent became my parent */ 347 for (i = 0; i < parent->cached_context->num_snaps; i++) 348 if (parent->cached_context->snaps[i] >= 349 realm->parent_since) 350 snapc->snaps[num++] = 351 parent->cached_context->snaps[i]; 352 if (parent->cached_context->seq > snapc->seq) 353 snapc->seq = parent->cached_context->seq; 354 } 355 memcpy(snapc->snaps + num, realm->snaps, 356 sizeof(u64)*realm->num_snaps); 357 num += realm->num_snaps; 358 memcpy(snapc->snaps + num, realm->prior_parent_snaps, 359 sizeof(u64)*realm->num_prior_parent_snaps); 360 num += realm->num_prior_parent_snaps; 361 362 sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL); 363 snapc->num_snaps = num; 364 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n", 365 realm->ino, realm, snapc, snapc->seq, 366 (unsigned int) snapc->num_snaps); 367 368 if (realm->cached_context) 369 ceph_put_snap_context(realm->cached_context); 370 realm->cached_context = snapc; 371 return 0; 372 373 fail: 374 /* 375 * if we fail, clear old (incorrect) cached_context... hopefully 376 * we'll have better luck building it later 377 */ 378 if (realm->cached_context) { 379 ceph_put_snap_context(realm->cached_context); 380 realm->cached_context = NULL; 381 } 382 pr_err("build_snap_context %llx %p fail %d\n", realm->ino, 383 realm, err); 384 return err; 385 } 386 387 /* 388 * rebuild snap context for the given realm and all of its children. 389 */ 390 static void rebuild_snap_realms(struct ceph_snap_realm *realm) 391 { 392 struct ceph_snap_realm *child; 393 394 dout("rebuild_snap_realms %llx %p\n", realm->ino, realm); 395 build_snap_context(realm); 396 397 list_for_each_entry(child, &realm->children, child_item) 398 rebuild_snap_realms(child); 399 } 400 401 402 /* 403 * helper to allocate and decode an array of snapids. free prior 404 * instance, if any. 405 */ 406 static int dup_array(u64 **dst, __le64 *src, u32 num) 407 { 408 u32 i; 409 410 kfree(*dst); 411 if (num) { 412 *dst = kcalloc(num, sizeof(u64), GFP_NOFS); 413 if (!*dst) 414 return -ENOMEM; 415 for (i = 0; i < num; i++) 416 (*dst)[i] = get_unaligned_le64(src + i); 417 } else { 418 *dst = NULL; 419 } 420 return 0; 421 } 422 423 424 /* 425 * When a snapshot is applied, the size/mtime inode metadata is queued 426 * in a ceph_cap_snap (one for each snapshot) until writeback 427 * completes and the metadata can be flushed back to the MDS. 428 * 429 * However, if a (sync) write is currently in-progress when we apply 430 * the snapshot, we have to wait until the write succeeds or fails 431 * (and a final size/mtime is known). In this case the 432 * cap_snap->writing = 1, and is said to be "pending." When the write 433 * finishes, we __ceph_finish_cap_snap(). 434 * 435 * Caller must hold snap_rwsem for read (i.e., the realm topology won't 436 * change). 437 */ 438 void ceph_queue_cap_snap(struct ceph_inode_info *ci) 439 { 440 struct inode *inode = &ci->vfs_inode; 441 struct ceph_cap_snap *capsnap; 442 int used, dirty; 443 444 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 445 if (!capsnap) { 446 pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); 447 return; 448 } 449 450 spin_lock(&ci->i_ceph_lock); 451 used = __ceph_caps_used(ci); 452 dirty = __ceph_caps_dirty(ci); 453 454 /* 455 * If there is a write in progress, treat that as a dirty Fw, 456 * even though it hasn't completed yet; by the time we finish 457 * up this capsnap it will be. 458 */ 459 if (used & CEPH_CAP_FILE_WR) 460 dirty |= CEPH_CAP_FILE_WR; 461 462 if (__ceph_have_pending_cap_snap(ci)) { 463 /* there is no point in queuing multiple "pending" cap_snaps, 464 as no new writes are allowed to start when pending, so any 465 writes in progress now were started before the previous 466 cap_snap. lucky us. */ 467 dout("queue_cap_snap %p already pending\n", inode); 468 kfree(capsnap); 469 } else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL| 470 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) { 471 struct ceph_snap_context *snapc = ci->i_head_snapc; 472 473 /* 474 * if we are a sync write, we may need to go to the snaprealm 475 * to get the current snapc. 476 */ 477 if (!snapc) 478 snapc = ci->i_snap_realm->cached_context; 479 480 dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n", 481 inode, capsnap, snapc, ceph_cap_string(dirty)); 482 ihold(inode); 483 484 atomic_set(&capsnap->nref, 1); 485 capsnap->ci = ci; 486 INIT_LIST_HEAD(&capsnap->ci_item); 487 INIT_LIST_HEAD(&capsnap->flushing_item); 488 489 capsnap->follows = snapc->seq; 490 capsnap->issued = __ceph_caps_issued(ci, NULL); 491 capsnap->dirty = dirty; 492 493 capsnap->mode = inode->i_mode; 494 capsnap->uid = inode->i_uid; 495 capsnap->gid = inode->i_gid; 496 497 if (dirty & CEPH_CAP_XATTR_EXCL) { 498 __ceph_build_xattrs_blob(ci); 499 capsnap->xattr_blob = 500 ceph_buffer_get(ci->i_xattrs.blob); 501 capsnap->xattr_version = ci->i_xattrs.version; 502 } else { 503 capsnap->xattr_blob = NULL; 504 capsnap->xattr_version = 0; 505 } 506 507 /* dirty page count moved from _head to this cap_snap; 508 all subsequent writes page dirties occur _after_ this 509 snapshot. */ 510 capsnap->dirty_pages = ci->i_wrbuffer_ref_head; 511 ci->i_wrbuffer_ref_head = 0; 512 capsnap->context = snapc; 513 ci->i_head_snapc = 514 ceph_get_snap_context(ci->i_snap_realm->cached_context); 515 dout(" new snapc is %p\n", ci->i_head_snapc); 516 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); 517 518 if (used & CEPH_CAP_FILE_WR) { 519 dout("queue_cap_snap %p cap_snap %p snapc %p" 520 " seq %llu used WR, now pending\n", inode, 521 capsnap, snapc, snapc->seq); 522 capsnap->writing = 1; 523 } else { 524 /* note mtime, size NOW. */ 525 __ceph_finish_cap_snap(ci, capsnap); 526 } 527 } else { 528 dout("queue_cap_snap %p nothing dirty|writing\n", inode); 529 kfree(capsnap); 530 } 531 532 spin_unlock(&ci->i_ceph_lock); 533 } 534 535 /* 536 * Finalize the size, mtime for a cap_snap.. that is, settle on final values 537 * to be used for the snapshot, to be flushed back to the mds. 538 * 539 * If capsnap can now be flushed, add to snap_flush list, and return 1. 540 * 541 * Caller must hold i_ceph_lock. 542 */ 543 int __ceph_finish_cap_snap(struct ceph_inode_info *ci, 544 struct ceph_cap_snap *capsnap) 545 { 546 struct inode *inode = &ci->vfs_inode; 547 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 548 549 BUG_ON(capsnap->writing); 550 capsnap->size = inode->i_size; 551 capsnap->mtime = inode->i_mtime; 552 capsnap->atime = inode->i_atime; 553 capsnap->ctime = inode->i_ctime; 554 capsnap->time_warp_seq = ci->i_time_warp_seq; 555 if (capsnap->dirty_pages) { 556 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu " 557 "still has %d dirty pages\n", inode, capsnap, 558 capsnap->context, capsnap->context->seq, 559 ceph_cap_string(capsnap->dirty), capsnap->size, 560 capsnap->dirty_pages); 561 return 0; 562 } 563 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n", 564 inode, capsnap, capsnap->context, 565 capsnap->context->seq, ceph_cap_string(capsnap->dirty), 566 capsnap->size); 567 568 spin_lock(&mdsc->snap_flush_lock); 569 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); 570 spin_unlock(&mdsc->snap_flush_lock); 571 return 1; /* caller may want to ceph_flush_snaps */ 572 } 573 574 /* 575 * Queue cap_snaps for snap writeback for this realm and its children. 576 * Called under snap_rwsem, so realm topology won't change. 577 */ 578 static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) 579 { 580 struct ceph_inode_info *ci; 581 struct inode *lastinode = NULL; 582 struct ceph_snap_realm *child; 583 584 dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino); 585 586 spin_lock(&realm->inodes_with_caps_lock); 587 list_for_each_entry(ci, &realm->inodes_with_caps, 588 i_snap_realm_item) { 589 struct inode *inode = igrab(&ci->vfs_inode); 590 if (!inode) 591 continue; 592 spin_unlock(&realm->inodes_with_caps_lock); 593 if (lastinode) 594 iput(lastinode); 595 lastinode = inode; 596 ceph_queue_cap_snap(ci); 597 spin_lock(&realm->inodes_with_caps_lock); 598 } 599 spin_unlock(&realm->inodes_with_caps_lock); 600 if (lastinode) 601 iput(lastinode); 602 603 list_for_each_entry(child, &realm->children, child_item) { 604 dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n", 605 realm, realm->ino, child, child->ino); 606 list_del_init(&child->dirty_item); 607 list_add(&child->dirty_item, &realm->dirty_item); 608 } 609 610 list_del_init(&realm->dirty_item); 611 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); 612 } 613 614 /* 615 * Parse and apply a snapblob "snap trace" from the MDS. This specifies 616 * the snap realm parameters from a given realm and all of its ancestors, 617 * up to the root. 618 * 619 * Caller must hold snap_rwsem for write. 620 */ 621 int ceph_update_snap_trace(struct ceph_mds_client *mdsc, 622 void *p, void *e, bool deletion) 623 { 624 struct ceph_mds_snap_realm *ri; /* encoded */ 625 __le64 *snaps; /* encoded */ 626 __le64 *prior_parent_snaps; /* encoded */ 627 struct ceph_snap_realm *realm; 628 int invalidate = 0; 629 int err = -ENOMEM; 630 LIST_HEAD(dirty_realms); 631 632 dout("update_snap_trace deletion=%d\n", deletion); 633 more: 634 ceph_decode_need(&p, e, sizeof(*ri), bad); 635 ri = p; 636 p += sizeof(*ri); 637 ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) + 638 le32_to_cpu(ri->num_prior_parent_snaps)), bad); 639 snaps = p; 640 p += sizeof(u64) * le32_to_cpu(ri->num_snaps); 641 prior_parent_snaps = p; 642 p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps); 643 644 realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino)); 645 if (!realm) { 646 realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino)); 647 if (IS_ERR(realm)) { 648 err = PTR_ERR(realm); 649 goto fail; 650 } 651 } 652 653 /* ensure the parent is correct */ 654 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); 655 if (err < 0) 656 goto fail; 657 invalidate += err; 658 659 if (le64_to_cpu(ri->seq) > realm->seq) { 660 dout("update_snap_trace updating %llx %p %lld -> %lld\n", 661 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq)); 662 /* update realm parameters, snap lists */ 663 realm->seq = le64_to_cpu(ri->seq); 664 realm->created = le64_to_cpu(ri->created); 665 realm->parent_since = le64_to_cpu(ri->parent_since); 666 667 realm->num_snaps = le32_to_cpu(ri->num_snaps); 668 err = dup_array(&realm->snaps, snaps, realm->num_snaps); 669 if (err < 0) 670 goto fail; 671 672 realm->num_prior_parent_snaps = 673 le32_to_cpu(ri->num_prior_parent_snaps); 674 err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps, 675 realm->num_prior_parent_snaps); 676 if (err < 0) 677 goto fail; 678 679 /* queue realm for cap_snap creation */ 680 list_add(&realm->dirty_item, &dirty_realms); 681 682 invalidate = 1; 683 } else if (!realm->cached_context) { 684 dout("update_snap_trace %llx %p seq %lld new\n", 685 realm->ino, realm, realm->seq); 686 invalidate = 1; 687 } else { 688 dout("update_snap_trace %llx %p seq %lld unchanged\n", 689 realm->ino, realm, realm->seq); 690 } 691 692 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, 693 realm, invalidate, p, e); 694 695 if (p < e) 696 goto more; 697 698 /* invalidate when we reach the _end_ (root) of the trace */ 699 if (invalidate) 700 rebuild_snap_realms(realm); 701 702 /* 703 * queue cap snaps _after_ we've built the new snap contexts, 704 * so that i_head_snapc can be set appropriately. 705 */ 706 while (!list_empty(&dirty_realms)) { 707 realm = list_first_entry(&dirty_realms, struct ceph_snap_realm, 708 dirty_item); 709 queue_realm_cap_snaps(realm); 710 } 711 712 __cleanup_empty_realms(mdsc); 713 return 0; 714 715 bad: 716 err = -EINVAL; 717 fail: 718 pr_err("update_snap_trace error %d\n", err); 719 return err; 720 } 721 722 723 /* 724 * Send any cap_snaps that are queued for flush. Try to carry 725 * s_mutex across multiple snap flushes to avoid locking overhead. 726 * 727 * Caller holds no locks. 728 */ 729 static void flush_snaps(struct ceph_mds_client *mdsc) 730 { 731 struct ceph_inode_info *ci; 732 struct inode *inode; 733 struct ceph_mds_session *session = NULL; 734 735 dout("flush_snaps\n"); 736 spin_lock(&mdsc->snap_flush_lock); 737 while (!list_empty(&mdsc->snap_flush_list)) { 738 ci = list_first_entry(&mdsc->snap_flush_list, 739 struct ceph_inode_info, i_snap_flush_item); 740 inode = &ci->vfs_inode; 741 ihold(inode); 742 spin_unlock(&mdsc->snap_flush_lock); 743 spin_lock(&ci->i_ceph_lock); 744 __ceph_flush_snaps(ci, &session, 0); 745 spin_unlock(&ci->i_ceph_lock); 746 iput(inode); 747 spin_lock(&mdsc->snap_flush_lock); 748 } 749 spin_unlock(&mdsc->snap_flush_lock); 750 751 if (session) { 752 mutex_unlock(&session->s_mutex); 753 ceph_put_mds_session(session); 754 } 755 dout("flush_snaps done\n"); 756 } 757 758 759 /* 760 * Handle a snap notification from the MDS. 761 * 762 * This can take two basic forms: the simplest is just a snap creation 763 * or deletion notification on an existing realm. This should update the 764 * realm and its children. 765 * 766 * The more difficult case is realm creation, due to snap creation at a 767 * new point in the file hierarchy, or due to a rename that moves a file or 768 * directory into another realm. 769 */ 770 void ceph_handle_snap(struct ceph_mds_client *mdsc, 771 struct ceph_mds_session *session, 772 struct ceph_msg *msg) 773 { 774 struct super_block *sb = mdsc->fsc->sb; 775 int mds = session->s_mds; 776 u64 split; 777 int op; 778 int trace_len; 779 struct ceph_snap_realm *realm = NULL; 780 void *p = msg->front.iov_base; 781 void *e = p + msg->front.iov_len; 782 struct ceph_mds_snap_head *h; 783 int num_split_inos, num_split_realms; 784 __le64 *split_inos = NULL, *split_realms = NULL; 785 int i; 786 int locked_rwsem = 0; 787 788 /* decode */ 789 if (msg->front.iov_len < sizeof(*h)) 790 goto bad; 791 h = p; 792 op = le32_to_cpu(h->op); 793 split = le64_to_cpu(h->split); /* non-zero if we are splitting an 794 * existing realm */ 795 num_split_inos = le32_to_cpu(h->num_split_inos); 796 num_split_realms = le32_to_cpu(h->num_split_realms); 797 trace_len = le32_to_cpu(h->trace_len); 798 p += sizeof(*h); 799 800 dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds, 801 ceph_snap_op_name(op), split, trace_len); 802 803 mutex_lock(&session->s_mutex); 804 session->s_seq++; 805 mutex_unlock(&session->s_mutex); 806 807 down_write(&mdsc->snap_rwsem); 808 locked_rwsem = 1; 809 810 if (op == CEPH_SNAP_OP_SPLIT) { 811 struct ceph_mds_snap_realm *ri; 812 813 /* 814 * A "split" breaks part of an existing realm off into 815 * a new realm. The MDS provides a list of inodes 816 * (with caps) and child realms that belong to the new 817 * child. 818 */ 819 split_inos = p; 820 p += sizeof(u64) * num_split_inos; 821 split_realms = p; 822 p += sizeof(u64) * num_split_realms; 823 ceph_decode_need(&p, e, sizeof(*ri), bad); 824 /* we will peek at realm info here, but will _not_ 825 * advance p, as the realm update will occur below in 826 * ceph_update_snap_trace. */ 827 ri = p; 828 829 realm = ceph_lookup_snap_realm(mdsc, split); 830 if (!realm) { 831 realm = ceph_create_snap_realm(mdsc, split); 832 if (IS_ERR(realm)) 833 goto out; 834 } 835 ceph_get_snap_realm(mdsc, realm); 836 837 dout("splitting snap_realm %llx %p\n", realm->ino, realm); 838 for (i = 0; i < num_split_inos; i++) { 839 struct ceph_vino vino = { 840 .ino = le64_to_cpu(split_inos[i]), 841 .snap = CEPH_NOSNAP, 842 }; 843 struct inode *inode = ceph_find_inode(sb, vino); 844 struct ceph_inode_info *ci; 845 struct ceph_snap_realm *oldrealm; 846 847 if (!inode) 848 continue; 849 ci = ceph_inode(inode); 850 851 spin_lock(&ci->i_ceph_lock); 852 if (!ci->i_snap_realm) 853 goto skip_inode; 854 /* 855 * If this inode belongs to a realm that was 856 * created after our new realm, we experienced 857 * a race (due to another split notifications 858 * arriving from a different MDS). So skip 859 * this inode. 860 */ 861 if (ci->i_snap_realm->created > 862 le64_to_cpu(ri->created)) { 863 dout(" leaving %p in newer realm %llx %p\n", 864 inode, ci->i_snap_realm->ino, 865 ci->i_snap_realm); 866 goto skip_inode; 867 } 868 dout(" will move %p to split realm %llx %p\n", 869 inode, realm->ino, realm); 870 /* 871 * Move the inode to the new realm 872 */ 873 spin_lock(&realm->inodes_with_caps_lock); 874 list_del_init(&ci->i_snap_realm_item); 875 list_add(&ci->i_snap_realm_item, 876 &realm->inodes_with_caps); 877 oldrealm = ci->i_snap_realm; 878 ci->i_snap_realm = realm; 879 spin_unlock(&realm->inodes_with_caps_lock); 880 spin_unlock(&ci->i_ceph_lock); 881 882 ceph_get_snap_realm(mdsc, realm); 883 ceph_put_snap_realm(mdsc, oldrealm); 884 885 iput(inode); 886 continue; 887 888 skip_inode: 889 spin_unlock(&ci->i_ceph_lock); 890 iput(inode); 891 } 892 893 /* we may have taken some of the old realm's children. */ 894 for (i = 0; i < num_split_realms; i++) { 895 struct ceph_snap_realm *child = 896 ceph_lookup_snap_realm(mdsc, 897 le64_to_cpu(split_realms[i])); 898 if (!child) 899 continue; 900 adjust_snap_realm_parent(mdsc, child, realm->ino); 901 } 902 } 903 904 /* 905 * update using the provided snap trace. if we are deleting a 906 * snap, we can avoid queueing cap_snaps. 907 */ 908 ceph_update_snap_trace(mdsc, p, e, 909 op == CEPH_SNAP_OP_DESTROY); 910 911 if (op == CEPH_SNAP_OP_SPLIT) 912 /* we took a reference when we created the realm, above */ 913 ceph_put_snap_realm(mdsc, realm); 914 915 __cleanup_empty_realms(mdsc); 916 917 up_write(&mdsc->snap_rwsem); 918 919 flush_snaps(mdsc); 920 return; 921 922 bad: 923 pr_err("corrupt snap message from mds%d\n", mds); 924 ceph_msg_dump(msg); 925 out: 926 if (locked_rwsem) 927 up_write(&mdsc->snap_rwsem); 928 return; 929 } 930 931 932 933