1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * refcounttree.c 5 * 6 * Copyright (C) 2009 Oracle. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public 10 * License version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include <linux/sort.h> 19 #include <cluster/masklog.h> 20 #include "ocfs2.h" 21 #include "inode.h" 22 #include "alloc.h" 23 #include "suballoc.h" 24 #include "journal.h" 25 #include "uptodate.h" 26 #include "super.h" 27 #include "buffer_head_io.h" 28 #include "blockcheck.h" 29 #include "refcounttree.h" 30 #include "sysfile.h" 31 #include "dlmglue.h" 32 #include "extent_map.h" 33 #include "aops.h" 34 #include "xattr.h" 35 #include "namei.h" 36 #include "ocfs2_trace.h" 37 #include "file.h" 38 39 #include <linux/bio.h> 40 #include <linux/blkdev.h> 41 #include <linux/slab.h> 42 #include <linux/writeback.h> 43 #include <linux/pagevec.h> 44 #include <linux/swap.h> 45 #include <linux/security.h> 46 #include <linux/fsnotify.h> 47 #include <linux/quotaops.h> 48 #include <linux/namei.h> 49 #include <linux/mount.h> 50 #include <linux/posix_acl.h> 51 52 struct ocfs2_cow_context { 53 struct inode *inode; 54 u32 cow_start; 55 u32 cow_len; 56 struct ocfs2_extent_tree data_et; 57 struct ocfs2_refcount_tree *ref_tree; 58 struct buffer_head *ref_root_bh; 59 struct ocfs2_alloc_context *meta_ac; 60 struct ocfs2_alloc_context *data_ac; 61 struct ocfs2_cached_dealloc_ctxt dealloc; 62 void *cow_object; 63 struct ocfs2_post_refcount *post_refcount; 64 int extra_credits; 65 int (*get_clusters)(struct ocfs2_cow_context *context, 66 u32 v_cluster, u32 *p_cluster, 67 u32 *num_clusters, 68 unsigned int *extent_flags); 69 int (*cow_duplicate_clusters)(handle_t *handle, 70 struct inode *inode, 71 u32 cpos, u32 old_cluster, 72 u32 new_cluster, u32 new_len); 73 }; 74 75 static inline struct ocfs2_refcount_tree * 76 cache_info_to_refcount(struct ocfs2_caching_info *ci) 77 { 78 return container_of(ci, struct ocfs2_refcount_tree, rf_ci); 79 } 80 81 static int ocfs2_validate_refcount_block(struct super_block *sb, 82 struct buffer_head *bh) 83 { 84 int rc; 85 struct ocfs2_refcount_block *rb = 86 (struct ocfs2_refcount_block *)bh->b_data; 87 88 trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr); 89 90 BUG_ON(!buffer_uptodate(bh)); 91 92 /* 93 * If the ecc fails, we return the error but otherwise 94 * leave the filesystem running. We know any error is 95 * local to this block. 96 */ 97 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); 98 if (rc) { 99 mlog(ML_ERROR, "Checksum failed for refcount block %llu\n", 100 (unsigned long long)bh->b_blocknr); 101 return rc; 102 } 103 104 105 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { 106 rc = ocfs2_error(sb, 107 "Refcount block #%llu has bad signature %.*s\n", 108 (unsigned long long)bh->b_blocknr, 7, 109 rb->rf_signature); 110 goto out; 111 } 112 113 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { 114 rc = ocfs2_error(sb, 115 "Refcount block #%llu has an invalid rf_blkno of %llu\n", 116 (unsigned long long)bh->b_blocknr, 117 (unsigned long long)le64_to_cpu(rb->rf_blkno)); 118 goto out; 119 } 120 121 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { 122 rc = ocfs2_error(sb, 123 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n", 124 (unsigned long long)bh->b_blocknr, 125 le32_to_cpu(rb->rf_fs_generation)); 126 goto out; 127 } 128 out: 129 return rc; 130 } 131 132 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci, 133 u64 rb_blkno, 134 struct buffer_head **bh) 135 { 136 int rc; 137 struct buffer_head *tmp = *bh; 138 139 rc = ocfs2_read_block(ci, rb_blkno, &tmp, 140 ocfs2_validate_refcount_block); 141 142 /* If ocfs2_read_block() got us a new bh, pass it up. */ 143 if (!rc && !*bh) 144 *bh = tmp; 145 146 return rc; 147 } 148 149 static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci) 150 { 151 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 152 153 return rf->rf_blkno; 154 } 155 156 static struct super_block * 157 ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci) 158 { 159 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 160 161 return rf->rf_sb; 162 } 163 164 static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci) 165 { 166 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 167 168 spin_lock(&rf->rf_lock); 169 } 170 171 static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci) 172 { 173 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 174 175 spin_unlock(&rf->rf_lock); 176 } 177 178 static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci) 179 { 180 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 181 182 mutex_lock(&rf->rf_io_mutex); 183 } 184 185 static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci) 186 { 187 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 188 189 mutex_unlock(&rf->rf_io_mutex); 190 } 191 192 static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = { 193 .co_owner = ocfs2_refcount_cache_owner, 194 .co_get_super = ocfs2_refcount_cache_get_super, 195 .co_cache_lock = ocfs2_refcount_cache_lock, 196 .co_cache_unlock = ocfs2_refcount_cache_unlock, 197 .co_io_lock = ocfs2_refcount_cache_io_lock, 198 .co_io_unlock = ocfs2_refcount_cache_io_unlock, 199 }; 200 201 static struct ocfs2_refcount_tree * 202 ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno) 203 { 204 struct rb_node *n = osb->osb_rf_lock_tree.rb_node; 205 struct ocfs2_refcount_tree *tree = NULL; 206 207 while (n) { 208 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); 209 210 if (blkno < tree->rf_blkno) 211 n = n->rb_left; 212 else if (blkno > tree->rf_blkno) 213 n = n->rb_right; 214 else 215 return tree; 216 } 217 218 return NULL; 219 } 220 221 /* osb_lock is already locked. */ 222 static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb, 223 struct ocfs2_refcount_tree *new) 224 { 225 u64 rf_blkno = new->rf_blkno; 226 struct rb_node *parent = NULL; 227 struct rb_node **p = &osb->osb_rf_lock_tree.rb_node; 228 struct ocfs2_refcount_tree *tmp; 229 230 while (*p) { 231 parent = *p; 232 233 tmp = rb_entry(parent, struct ocfs2_refcount_tree, 234 rf_node); 235 236 if (rf_blkno < tmp->rf_blkno) 237 p = &(*p)->rb_left; 238 else if (rf_blkno > tmp->rf_blkno) 239 p = &(*p)->rb_right; 240 else { 241 /* This should never happen! */ 242 mlog(ML_ERROR, "Duplicate refcount block %llu found!\n", 243 (unsigned long long)rf_blkno); 244 BUG(); 245 } 246 } 247 248 rb_link_node(&new->rf_node, parent, p); 249 rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree); 250 } 251 252 static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree) 253 { 254 ocfs2_metadata_cache_exit(&tree->rf_ci); 255 ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres); 256 ocfs2_lock_res_free(&tree->rf_lockres); 257 kfree(tree); 258 } 259 260 static inline void 261 ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb, 262 struct ocfs2_refcount_tree *tree) 263 { 264 rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree); 265 if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree) 266 osb->osb_ref_tree_lru = NULL; 267 } 268 269 static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, 270 struct ocfs2_refcount_tree *tree) 271 { 272 spin_lock(&osb->osb_lock); 273 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 274 spin_unlock(&osb->osb_lock); 275 } 276 277 static void ocfs2_kref_remove_refcount_tree(struct kref *kref) 278 { 279 struct ocfs2_refcount_tree *tree = 280 container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); 281 282 ocfs2_free_refcount_tree(tree); 283 } 284 285 static inline void 286 ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree) 287 { 288 kref_get(&tree->rf_getcnt); 289 } 290 291 static inline void 292 ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree) 293 { 294 kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree); 295 } 296 297 static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new, 298 struct super_block *sb) 299 { 300 ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops); 301 mutex_init(&new->rf_io_mutex); 302 new->rf_sb = sb; 303 spin_lock_init(&new->rf_lock); 304 } 305 306 static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb, 307 struct ocfs2_refcount_tree *new, 308 u64 rf_blkno, u32 generation) 309 { 310 init_rwsem(&new->rf_sem); 311 ocfs2_refcount_lock_res_init(&new->rf_lockres, osb, 312 rf_blkno, generation); 313 } 314 315 static struct ocfs2_refcount_tree* 316 ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno) 317 { 318 struct ocfs2_refcount_tree *new; 319 320 new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS); 321 if (!new) 322 return NULL; 323 324 new->rf_blkno = rf_blkno; 325 kref_init(&new->rf_getcnt); 326 ocfs2_init_refcount_tree_ci(new, osb->sb); 327 328 return new; 329 } 330 331 static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno, 332 struct ocfs2_refcount_tree **ret_tree) 333 { 334 int ret = 0; 335 struct ocfs2_refcount_tree *tree, *new = NULL; 336 struct buffer_head *ref_root_bh = NULL; 337 struct ocfs2_refcount_block *ref_rb; 338 339 spin_lock(&osb->osb_lock); 340 if (osb->osb_ref_tree_lru && 341 osb->osb_ref_tree_lru->rf_blkno == rf_blkno) 342 tree = osb->osb_ref_tree_lru; 343 else 344 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 345 if (tree) 346 goto out; 347 348 spin_unlock(&osb->osb_lock); 349 350 new = ocfs2_allocate_refcount_tree(osb, rf_blkno); 351 if (!new) { 352 ret = -ENOMEM; 353 mlog_errno(ret); 354 return ret; 355 } 356 /* 357 * We need the generation to create the refcount tree lock and since 358 * it isn't changed during the tree modification, we are safe here to 359 * read without protection. 360 * We also have to purge the cache after we create the lock since the 361 * refcount block may have the stale data. It can only be trusted when 362 * we hold the refcount lock. 363 */ 364 ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh); 365 if (ret) { 366 mlog_errno(ret); 367 ocfs2_metadata_cache_exit(&new->rf_ci); 368 kfree(new); 369 return ret; 370 } 371 372 ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 373 new->rf_generation = le32_to_cpu(ref_rb->rf_generation); 374 ocfs2_init_refcount_tree_lock(osb, new, rf_blkno, 375 new->rf_generation); 376 ocfs2_metadata_cache_purge(&new->rf_ci); 377 378 spin_lock(&osb->osb_lock); 379 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 380 if (tree) 381 goto out; 382 383 ocfs2_insert_refcount_tree(osb, new); 384 385 tree = new; 386 new = NULL; 387 388 out: 389 *ret_tree = tree; 390 391 osb->osb_ref_tree_lru = tree; 392 393 spin_unlock(&osb->osb_lock); 394 395 if (new) 396 ocfs2_free_refcount_tree(new); 397 398 brelse(ref_root_bh); 399 return ret; 400 } 401 402 static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno) 403 { 404 int ret; 405 struct buffer_head *di_bh = NULL; 406 struct ocfs2_dinode *di; 407 408 ret = ocfs2_read_inode_block(inode, &di_bh); 409 if (ret) { 410 mlog_errno(ret); 411 goto out; 412 } 413 414 BUG_ON(!ocfs2_is_refcount_inode(inode)); 415 416 di = (struct ocfs2_dinode *)di_bh->b_data; 417 *ref_blkno = le64_to_cpu(di->i_refcount_loc); 418 brelse(di_bh); 419 out: 420 return ret; 421 } 422 423 static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 424 struct ocfs2_refcount_tree *tree, int rw) 425 { 426 int ret; 427 428 ret = ocfs2_refcount_lock(tree, rw); 429 if (ret) { 430 mlog_errno(ret); 431 goto out; 432 } 433 434 if (rw) 435 down_write(&tree->rf_sem); 436 else 437 down_read(&tree->rf_sem); 438 439 out: 440 return ret; 441 } 442 443 /* 444 * Lock the refcount tree pointed by ref_blkno and return the tree. 445 * In most case, we lock the tree and read the refcount block. 446 * So read it here if the caller really needs it. 447 * 448 * If the tree has been re-created by other node, it will free the 449 * old one and re-create it. 450 */ 451 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 452 u64 ref_blkno, int rw, 453 struct ocfs2_refcount_tree **ret_tree, 454 struct buffer_head **ref_bh) 455 { 456 int ret, delete_tree = 0; 457 struct ocfs2_refcount_tree *tree = NULL; 458 struct buffer_head *ref_root_bh = NULL; 459 struct ocfs2_refcount_block *rb; 460 461 again: 462 ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree); 463 if (ret) { 464 mlog_errno(ret); 465 return ret; 466 } 467 468 ocfs2_refcount_tree_get(tree); 469 470 ret = __ocfs2_lock_refcount_tree(osb, tree, rw); 471 if (ret) { 472 mlog_errno(ret); 473 ocfs2_refcount_tree_put(tree); 474 goto out; 475 } 476 477 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 478 &ref_root_bh); 479 if (ret) { 480 mlog_errno(ret); 481 ocfs2_unlock_refcount_tree(osb, tree, rw); 482 goto out; 483 } 484 485 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 486 /* 487 * If the refcount block has been freed and re-created, we may need 488 * to recreate the refcount tree also. 489 * 490 * Here we just remove the tree from the rb-tree, and the last 491 * kref holder will unlock and delete this refcount_tree. 492 * Then we goto "again" and ocfs2_get_refcount_tree will create 493 * the new refcount tree for us. 494 */ 495 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { 496 if (!tree->rf_removed) { 497 ocfs2_erase_refcount_tree_from_list(osb, tree); 498 tree->rf_removed = 1; 499 delete_tree = 1; 500 } 501 502 ocfs2_unlock_refcount_tree(osb, tree, rw); 503 /* 504 * We get an extra reference when we create the refcount 505 * tree, so another put will destroy it. 506 */ 507 if (delete_tree) 508 ocfs2_refcount_tree_put(tree); 509 brelse(ref_root_bh); 510 ref_root_bh = NULL; 511 goto again; 512 } 513 514 *ret_tree = tree; 515 if (ref_bh) { 516 *ref_bh = ref_root_bh; 517 ref_root_bh = NULL; 518 } 519 out: 520 brelse(ref_root_bh); 521 return ret; 522 } 523 524 void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, 525 struct ocfs2_refcount_tree *tree, int rw) 526 { 527 if (rw) 528 up_write(&tree->rf_sem); 529 else 530 up_read(&tree->rf_sem); 531 532 ocfs2_refcount_unlock(tree, rw); 533 ocfs2_refcount_tree_put(tree); 534 } 535 536 void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) 537 { 538 struct rb_node *node; 539 struct ocfs2_refcount_tree *tree; 540 struct rb_root *root = &osb->osb_rf_lock_tree; 541 542 while ((node = rb_last(root)) != NULL) { 543 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); 544 545 trace_ocfs2_purge_refcount_trees( 546 (unsigned long long) tree->rf_blkno); 547 548 rb_erase(&tree->rf_node, root); 549 ocfs2_free_refcount_tree(tree); 550 } 551 } 552 553 /* 554 * Create a refcount tree for an inode. 555 * We take for granted that the inode is already locked. 556 */ 557 static int ocfs2_create_refcount_tree(struct inode *inode, 558 struct buffer_head *di_bh) 559 { 560 int ret; 561 handle_t *handle = NULL; 562 struct ocfs2_alloc_context *meta_ac = NULL; 563 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 564 struct ocfs2_inode_info *oi = OCFS2_I(inode); 565 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 566 struct buffer_head *new_bh = NULL; 567 struct ocfs2_refcount_block *rb; 568 struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL; 569 u16 suballoc_bit_start; 570 u32 num_got; 571 u64 suballoc_loc, first_blkno; 572 573 BUG_ON(ocfs2_is_refcount_inode(inode)); 574 575 trace_ocfs2_create_refcount_tree( 576 (unsigned long long)oi->ip_blkno); 577 578 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 579 if (ret) { 580 mlog_errno(ret); 581 goto out; 582 } 583 584 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS); 585 if (IS_ERR(handle)) { 586 ret = PTR_ERR(handle); 587 mlog_errno(ret); 588 goto out; 589 } 590 591 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 592 OCFS2_JOURNAL_ACCESS_WRITE); 593 if (ret) { 594 mlog_errno(ret); 595 goto out_commit; 596 } 597 598 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 599 &suballoc_bit_start, &num_got, 600 &first_blkno); 601 if (ret) { 602 mlog_errno(ret); 603 goto out_commit; 604 } 605 606 new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno); 607 if (!new_tree) { 608 ret = -ENOMEM; 609 mlog_errno(ret); 610 goto out_commit; 611 } 612 613 new_bh = sb_getblk(inode->i_sb, first_blkno); 614 if (!new_bh) { 615 ret = -ENOMEM; 616 mlog_errno(ret); 617 goto out_commit; 618 } 619 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); 620 621 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, 622 OCFS2_JOURNAL_ACCESS_CREATE); 623 if (ret) { 624 mlog_errno(ret); 625 goto out_commit; 626 } 627 628 /* Initialize ocfs2_refcount_block. */ 629 rb = (struct ocfs2_refcount_block *)new_bh->b_data; 630 memset(rb, 0, inode->i_sb->s_blocksize); 631 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 632 rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 633 rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 634 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 635 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); 636 rb->rf_blkno = cpu_to_le64(first_blkno); 637 rb->rf_count = cpu_to_le32(1); 638 rb->rf_records.rl_count = 639 cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb)); 640 spin_lock(&osb->osb_lock); 641 rb->rf_generation = osb->s_next_generation++; 642 spin_unlock(&osb->osb_lock); 643 644 ocfs2_journal_dirty(handle, new_bh); 645 646 spin_lock(&oi->ip_lock); 647 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 648 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 649 di->i_refcount_loc = cpu_to_le64(first_blkno); 650 spin_unlock(&oi->ip_lock); 651 652 trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno); 653 654 ocfs2_journal_dirty(handle, di_bh); 655 656 /* 657 * We have to init the tree lock here since it will use 658 * the generation number to create it. 659 */ 660 new_tree->rf_generation = le32_to_cpu(rb->rf_generation); 661 ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno, 662 new_tree->rf_generation); 663 664 spin_lock(&osb->osb_lock); 665 tree = ocfs2_find_refcount_tree(osb, first_blkno); 666 667 /* 668 * We've just created a new refcount tree in this block. If 669 * we found a refcount tree on the ocfs2_super, it must be 670 * one we just deleted. We free the old tree before 671 * inserting the new tree. 672 */ 673 BUG_ON(tree && tree->rf_generation == new_tree->rf_generation); 674 if (tree) 675 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 676 ocfs2_insert_refcount_tree(osb, new_tree); 677 spin_unlock(&osb->osb_lock); 678 new_tree = NULL; 679 if (tree) 680 ocfs2_refcount_tree_put(tree); 681 682 out_commit: 683 ocfs2_commit_trans(osb, handle); 684 685 out: 686 if (new_tree) { 687 ocfs2_metadata_cache_exit(&new_tree->rf_ci); 688 kfree(new_tree); 689 } 690 691 brelse(new_bh); 692 if (meta_ac) 693 ocfs2_free_alloc_context(meta_ac); 694 695 return ret; 696 } 697 698 static int ocfs2_set_refcount_tree(struct inode *inode, 699 struct buffer_head *di_bh, 700 u64 refcount_loc) 701 { 702 int ret; 703 handle_t *handle = NULL; 704 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 705 struct ocfs2_inode_info *oi = OCFS2_I(inode); 706 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 707 struct buffer_head *ref_root_bh = NULL; 708 struct ocfs2_refcount_block *rb; 709 struct ocfs2_refcount_tree *ref_tree; 710 711 BUG_ON(ocfs2_is_refcount_inode(inode)); 712 713 ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, 714 &ref_tree, &ref_root_bh); 715 if (ret) { 716 mlog_errno(ret); 717 return ret; 718 } 719 720 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS); 721 if (IS_ERR(handle)) { 722 ret = PTR_ERR(handle); 723 mlog_errno(ret); 724 goto out; 725 } 726 727 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 728 OCFS2_JOURNAL_ACCESS_WRITE); 729 if (ret) { 730 mlog_errno(ret); 731 goto out_commit; 732 } 733 734 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh, 735 OCFS2_JOURNAL_ACCESS_WRITE); 736 if (ret) { 737 mlog_errno(ret); 738 goto out_commit; 739 } 740 741 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 742 le32_add_cpu(&rb->rf_count, 1); 743 744 ocfs2_journal_dirty(handle, ref_root_bh); 745 746 spin_lock(&oi->ip_lock); 747 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 748 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 749 di->i_refcount_loc = cpu_to_le64(refcount_loc); 750 spin_unlock(&oi->ip_lock); 751 ocfs2_journal_dirty(handle, di_bh); 752 753 out_commit: 754 ocfs2_commit_trans(osb, handle); 755 out: 756 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 757 brelse(ref_root_bh); 758 759 return ret; 760 } 761 762 int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) 763 { 764 int ret, delete_tree = 0; 765 handle_t *handle = NULL; 766 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 767 struct ocfs2_inode_info *oi = OCFS2_I(inode); 768 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 769 struct ocfs2_refcount_block *rb; 770 struct inode *alloc_inode = NULL; 771 struct buffer_head *alloc_bh = NULL; 772 struct buffer_head *blk_bh = NULL; 773 struct ocfs2_refcount_tree *ref_tree; 774 int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS; 775 u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc); 776 u16 bit = 0; 777 778 if (!ocfs2_is_refcount_inode(inode)) 779 return 0; 780 781 BUG_ON(!ref_blkno); 782 ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh); 783 if (ret) { 784 mlog_errno(ret); 785 return ret; 786 } 787 788 rb = (struct ocfs2_refcount_block *)blk_bh->b_data; 789 790 /* 791 * If we are the last user, we need to free the block. 792 * So lock the allocator ahead. 793 */ 794 if (le32_to_cpu(rb->rf_count) == 1) { 795 blk = le64_to_cpu(rb->rf_blkno); 796 bit = le16_to_cpu(rb->rf_suballoc_bit); 797 if (rb->rf_suballoc_loc) 798 bg_blkno = le64_to_cpu(rb->rf_suballoc_loc); 799 else 800 bg_blkno = ocfs2_which_suballoc_group(blk, bit); 801 802 alloc_inode = ocfs2_get_system_file_inode(osb, 803 EXTENT_ALLOC_SYSTEM_INODE, 804 le16_to_cpu(rb->rf_suballoc_slot)); 805 if (!alloc_inode) { 806 ret = -ENOMEM; 807 mlog_errno(ret); 808 goto out; 809 } 810 inode_lock(alloc_inode); 811 812 ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1); 813 if (ret) { 814 mlog_errno(ret); 815 goto out_mutex; 816 } 817 818 credits += OCFS2_SUBALLOC_FREE; 819 } 820 821 handle = ocfs2_start_trans(osb, credits); 822 if (IS_ERR(handle)) { 823 ret = PTR_ERR(handle); 824 mlog_errno(ret); 825 goto out_unlock; 826 } 827 828 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 829 OCFS2_JOURNAL_ACCESS_WRITE); 830 if (ret) { 831 mlog_errno(ret); 832 goto out_commit; 833 } 834 835 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh, 836 OCFS2_JOURNAL_ACCESS_WRITE); 837 if (ret) { 838 mlog_errno(ret); 839 goto out_commit; 840 } 841 842 spin_lock(&oi->ip_lock); 843 oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL; 844 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 845 di->i_refcount_loc = 0; 846 spin_unlock(&oi->ip_lock); 847 ocfs2_journal_dirty(handle, di_bh); 848 849 le32_add_cpu(&rb->rf_count , -1); 850 ocfs2_journal_dirty(handle, blk_bh); 851 852 if (!rb->rf_count) { 853 delete_tree = 1; 854 ocfs2_erase_refcount_tree_from_list(osb, ref_tree); 855 ret = ocfs2_free_suballoc_bits(handle, alloc_inode, 856 alloc_bh, bit, bg_blkno, 1); 857 if (ret) 858 mlog_errno(ret); 859 } 860 861 out_commit: 862 ocfs2_commit_trans(osb, handle); 863 out_unlock: 864 if (alloc_inode) { 865 ocfs2_inode_unlock(alloc_inode, 1); 866 brelse(alloc_bh); 867 } 868 out_mutex: 869 if (alloc_inode) { 870 inode_unlock(alloc_inode); 871 iput(alloc_inode); 872 } 873 out: 874 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 875 if (delete_tree) 876 ocfs2_refcount_tree_put(ref_tree); 877 brelse(blk_bh); 878 879 return ret; 880 } 881 882 static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci, 883 struct buffer_head *ref_leaf_bh, 884 u64 cpos, unsigned int len, 885 struct ocfs2_refcount_rec *ret_rec, 886 int *index) 887 { 888 int i = 0; 889 struct ocfs2_refcount_block *rb = 890 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 891 struct ocfs2_refcount_rec *rec = NULL; 892 893 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { 894 rec = &rb->rf_records.rl_recs[i]; 895 896 if (le64_to_cpu(rec->r_cpos) + 897 le32_to_cpu(rec->r_clusters) <= cpos) 898 continue; 899 else if (le64_to_cpu(rec->r_cpos) > cpos) 900 break; 901 902 /* ok, cpos fail in this rec. Just return. */ 903 if (ret_rec) 904 *ret_rec = *rec; 905 goto out; 906 } 907 908 if (ret_rec) { 909 /* We meet with a hole here, so fake the rec. */ 910 ret_rec->r_cpos = cpu_to_le64(cpos); 911 ret_rec->r_refcount = 0; 912 if (i < le16_to_cpu(rb->rf_records.rl_used) && 913 le64_to_cpu(rec->r_cpos) < cpos + len) 914 ret_rec->r_clusters = 915 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos); 916 else 917 ret_rec->r_clusters = cpu_to_le32(len); 918 } 919 920 out: 921 *index = i; 922 } 923 924 /* 925 * Try to remove refcount tree. The mechanism is: 926 * 1) Check whether i_clusters == 0, if no, exit. 927 * 2) check whether we have i_xattr_loc in dinode. if yes, exit. 928 * 3) Check whether we have inline xattr stored outside, if yes, exit. 929 * 4) Remove the tree. 930 */ 931 int ocfs2_try_remove_refcount_tree(struct inode *inode, 932 struct buffer_head *di_bh) 933 { 934 int ret; 935 struct ocfs2_inode_info *oi = OCFS2_I(inode); 936 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 937 938 down_write(&oi->ip_xattr_sem); 939 down_write(&oi->ip_alloc_sem); 940 941 if (oi->ip_clusters) 942 goto out; 943 944 if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc) 945 goto out; 946 947 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL && 948 ocfs2_has_inline_xattr_value_outside(inode, di)) 949 goto out; 950 951 ret = ocfs2_remove_refcount_tree(inode, di_bh); 952 if (ret) 953 mlog_errno(ret); 954 out: 955 up_write(&oi->ip_alloc_sem); 956 up_write(&oi->ip_xattr_sem); 957 return 0; 958 } 959 960 /* 961 * Find the end range for a leaf refcount block indicated by 962 * el->l_recs[index].e_blkno. 963 */ 964 static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, 965 struct buffer_head *ref_root_bh, 966 struct ocfs2_extent_block *eb, 967 struct ocfs2_extent_list *el, 968 int index, u32 *cpos_end) 969 { 970 int ret, i, subtree_root; 971 u32 cpos; 972 u64 blkno; 973 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 974 struct ocfs2_path *left_path = NULL, *right_path = NULL; 975 struct ocfs2_extent_tree et; 976 struct ocfs2_extent_list *tmp_el; 977 978 if (index < le16_to_cpu(el->l_next_free_rec) - 1) { 979 /* 980 * We have a extent rec after index, so just use the e_cpos 981 * of the next extent rec. 982 */ 983 *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); 984 return 0; 985 } 986 987 if (!eb || (eb && !eb->h_next_leaf_blk)) { 988 /* 989 * We are the last extent rec, so any high cpos should 990 * be stored in this leaf refcount block. 991 */ 992 *cpos_end = UINT_MAX; 993 return 0; 994 } 995 996 /* 997 * If the extent block isn't the last one, we have to find 998 * the subtree root between this extent block and the next 999 * leaf extent block and get the corresponding e_cpos from 1000 * the subroot. Otherwise we may corrupt the b-tree. 1001 */ 1002 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 1003 1004 left_path = ocfs2_new_path_from_et(&et); 1005 if (!left_path) { 1006 ret = -ENOMEM; 1007 mlog_errno(ret); 1008 goto out; 1009 } 1010 1011 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); 1012 ret = ocfs2_find_path(ci, left_path, cpos); 1013 if (ret) { 1014 mlog_errno(ret); 1015 goto out; 1016 } 1017 1018 right_path = ocfs2_new_path_from_path(left_path); 1019 if (!right_path) { 1020 ret = -ENOMEM; 1021 mlog_errno(ret); 1022 goto out; 1023 } 1024 1025 ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); 1026 if (ret) { 1027 mlog_errno(ret); 1028 goto out; 1029 } 1030 1031 ret = ocfs2_find_path(ci, right_path, cpos); 1032 if (ret) { 1033 mlog_errno(ret); 1034 goto out; 1035 } 1036 1037 subtree_root = ocfs2_find_subtree_root(&et, left_path, 1038 right_path); 1039 1040 tmp_el = left_path->p_node[subtree_root].el; 1041 blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; 1042 for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) { 1043 if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { 1044 *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); 1045 break; 1046 } 1047 } 1048 1049 BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec)); 1050 1051 out: 1052 ocfs2_free_path(left_path); 1053 ocfs2_free_path(right_path); 1054 return ret; 1055 } 1056 1057 /* 1058 * Given a cpos and len, try to find the refcount record which contains cpos. 1059 * 1. If cpos can be found in one refcount record, return the record. 1060 * 2. If cpos can't be found, return a fake record which start from cpos 1061 * and end at a small value between cpos+len and start of the next record. 1062 * This fake record has r_refcount = 0. 1063 */ 1064 static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, 1065 struct buffer_head *ref_root_bh, 1066 u64 cpos, unsigned int len, 1067 struct ocfs2_refcount_rec *ret_rec, 1068 int *index, 1069 struct buffer_head **ret_bh) 1070 { 1071 int ret = 0, i, found; 1072 u32 low_cpos, uninitialized_var(cpos_end); 1073 struct ocfs2_extent_list *el; 1074 struct ocfs2_extent_rec *rec = NULL; 1075 struct ocfs2_extent_block *eb = NULL; 1076 struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; 1077 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1078 struct ocfs2_refcount_block *rb = 1079 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1080 1081 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { 1082 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len, 1083 ret_rec, index); 1084 *ret_bh = ref_root_bh; 1085 get_bh(ref_root_bh); 1086 return 0; 1087 } 1088 1089 el = &rb->rf_list; 1090 low_cpos = cpos & OCFS2_32BIT_POS_MASK; 1091 1092 if (el->l_tree_depth) { 1093 ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh); 1094 if (ret) { 1095 mlog_errno(ret); 1096 goto out; 1097 } 1098 1099 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 1100 el = &eb->h_list; 1101 1102 if (el->l_tree_depth) { 1103 ret = ocfs2_error(sb, 1104 "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n", 1105 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1106 (unsigned long long)eb_bh->b_blocknr); 1107 goto out; 1108 } 1109 } 1110 1111 found = 0; 1112 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { 1113 rec = &el->l_recs[i]; 1114 1115 if (le32_to_cpu(rec->e_cpos) <= low_cpos) { 1116 found = 1; 1117 break; 1118 } 1119 } 1120 1121 if (found) { 1122 ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, 1123 eb, el, i, &cpos_end); 1124 if (ret) { 1125 mlog_errno(ret); 1126 goto out; 1127 } 1128 1129 if (cpos_end < low_cpos + len) 1130 len = cpos_end - low_cpos; 1131 } 1132 1133 ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), 1134 &ref_leaf_bh); 1135 if (ret) { 1136 mlog_errno(ret); 1137 goto out; 1138 } 1139 1140 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len, 1141 ret_rec, index); 1142 *ret_bh = ref_leaf_bh; 1143 out: 1144 brelse(eb_bh); 1145 return ret; 1146 } 1147 1148 enum ocfs2_ref_rec_contig { 1149 REF_CONTIG_NONE = 0, 1150 REF_CONTIG_LEFT, 1151 REF_CONTIG_RIGHT, 1152 REF_CONTIG_LEFTRIGHT, 1153 }; 1154 1155 static enum ocfs2_ref_rec_contig 1156 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, 1157 int index) 1158 { 1159 if ((rb->rf_records.rl_recs[index].r_refcount == 1160 rb->rf_records.rl_recs[index + 1].r_refcount) && 1161 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + 1162 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == 1163 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) 1164 return REF_CONTIG_RIGHT; 1165 1166 return REF_CONTIG_NONE; 1167 } 1168 1169 static enum ocfs2_ref_rec_contig 1170 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, 1171 int index) 1172 { 1173 enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE; 1174 1175 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) 1176 ret = ocfs2_refcount_rec_adjacent(rb, index); 1177 1178 if (index > 0) { 1179 enum ocfs2_ref_rec_contig tmp; 1180 1181 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); 1182 1183 if (tmp == REF_CONTIG_RIGHT) { 1184 if (ret == REF_CONTIG_RIGHT) 1185 ret = REF_CONTIG_LEFTRIGHT; 1186 else 1187 ret = REF_CONTIG_LEFT; 1188 } 1189 } 1190 1191 return ret; 1192 } 1193 1194 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, 1195 int index) 1196 { 1197 BUG_ON(rb->rf_records.rl_recs[index].r_refcount != 1198 rb->rf_records.rl_recs[index+1].r_refcount); 1199 1200 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, 1201 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); 1202 1203 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) 1204 memmove(&rb->rf_records.rl_recs[index + 1], 1205 &rb->rf_records.rl_recs[index + 2], 1206 sizeof(struct ocfs2_refcount_rec) * 1207 (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); 1208 1209 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], 1210 0, sizeof(struct ocfs2_refcount_rec)); 1211 le16_add_cpu(&rb->rf_records.rl_used, -1); 1212 } 1213 1214 /* 1215 * Merge the refcount rec if we are contiguous with the adjacent recs. 1216 */ 1217 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, 1218 int index) 1219 { 1220 enum ocfs2_ref_rec_contig contig = 1221 ocfs2_refcount_rec_contig(rb, index); 1222 1223 if (contig == REF_CONTIG_NONE) 1224 return; 1225 1226 if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) { 1227 BUG_ON(index == 0); 1228 index--; 1229 } 1230 1231 ocfs2_rotate_refcount_rec_left(rb, index); 1232 1233 if (contig == REF_CONTIG_LEFTRIGHT) 1234 ocfs2_rotate_refcount_rec_left(rb, index); 1235 } 1236 1237 /* 1238 * Change the refcount indexed by "index" in ref_bh. 1239 * If refcount reaches 0, remove it. 1240 */ 1241 static int ocfs2_change_refcount_rec(handle_t *handle, 1242 struct ocfs2_caching_info *ci, 1243 struct buffer_head *ref_leaf_bh, 1244 int index, int merge, int change) 1245 { 1246 int ret; 1247 struct ocfs2_refcount_block *rb = 1248 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1249 struct ocfs2_refcount_list *rl = &rb->rf_records; 1250 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; 1251 1252 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1253 OCFS2_JOURNAL_ACCESS_WRITE); 1254 if (ret) { 1255 mlog_errno(ret); 1256 goto out; 1257 } 1258 1259 trace_ocfs2_change_refcount_rec( 1260 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1261 index, le32_to_cpu(rec->r_refcount), change); 1262 le32_add_cpu(&rec->r_refcount, change); 1263 1264 if (!rec->r_refcount) { 1265 if (index != le16_to_cpu(rl->rl_used) - 1) { 1266 memmove(rec, rec + 1, 1267 (le16_to_cpu(rl->rl_used) - index - 1) * 1268 sizeof(struct ocfs2_refcount_rec)); 1269 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], 1270 0, sizeof(struct ocfs2_refcount_rec)); 1271 } 1272 1273 le16_add_cpu(&rl->rl_used, -1); 1274 } else if (merge) 1275 ocfs2_refcount_rec_merge(rb, index); 1276 1277 ocfs2_journal_dirty(handle, ref_leaf_bh); 1278 out: 1279 return ret; 1280 } 1281 1282 static int ocfs2_expand_inline_ref_root(handle_t *handle, 1283 struct ocfs2_caching_info *ci, 1284 struct buffer_head *ref_root_bh, 1285 struct buffer_head **ref_leaf_bh, 1286 struct ocfs2_alloc_context *meta_ac) 1287 { 1288 int ret; 1289 u16 suballoc_bit_start; 1290 u32 num_got; 1291 u64 suballoc_loc, blkno; 1292 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1293 struct buffer_head *new_bh = NULL; 1294 struct ocfs2_refcount_block *new_rb; 1295 struct ocfs2_refcount_block *root_rb = 1296 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1297 1298 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1299 OCFS2_JOURNAL_ACCESS_WRITE); 1300 if (ret) { 1301 mlog_errno(ret); 1302 goto out; 1303 } 1304 1305 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1306 &suballoc_bit_start, &num_got, 1307 &blkno); 1308 if (ret) { 1309 mlog_errno(ret); 1310 goto out; 1311 } 1312 1313 new_bh = sb_getblk(sb, blkno); 1314 if (new_bh == NULL) { 1315 ret = -ENOMEM; 1316 mlog_errno(ret); 1317 goto out; 1318 } 1319 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1320 1321 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1322 OCFS2_JOURNAL_ACCESS_CREATE); 1323 if (ret) { 1324 mlog_errno(ret); 1325 goto out; 1326 } 1327 1328 /* 1329 * Initialize ocfs2_refcount_block. 1330 * It should contain the same information as the old root. 1331 * so just memcpy it and change the corresponding field. 1332 */ 1333 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); 1334 1335 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1336 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1337 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1338 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1339 new_rb->rf_blkno = cpu_to_le64(blkno); 1340 new_rb->rf_cpos = cpu_to_le32(0); 1341 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1342 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1343 ocfs2_journal_dirty(handle, new_bh); 1344 1345 /* Now change the root. */ 1346 memset(&root_rb->rf_list, 0, sb->s_blocksize - 1347 offsetof(struct ocfs2_refcount_block, rf_list)); 1348 root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb)); 1349 root_rb->rf_clusters = cpu_to_le32(1); 1350 root_rb->rf_list.l_next_free_rec = cpu_to_le16(1); 1351 root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno); 1352 root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); 1353 root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL); 1354 1355 ocfs2_journal_dirty(handle, ref_root_bh); 1356 1357 trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno, 1358 le16_to_cpu(new_rb->rf_records.rl_used)); 1359 1360 *ref_leaf_bh = new_bh; 1361 new_bh = NULL; 1362 out: 1363 brelse(new_bh); 1364 return ret; 1365 } 1366 1367 static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev, 1368 struct ocfs2_refcount_rec *next) 1369 { 1370 if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <= 1371 ocfs2_get_ref_rec_low_cpos(next)) 1372 return 1; 1373 1374 return 0; 1375 } 1376 1377 static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b) 1378 { 1379 const struct ocfs2_refcount_rec *l = a, *r = b; 1380 u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); 1381 u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r); 1382 1383 if (l_cpos > r_cpos) 1384 return 1; 1385 if (l_cpos < r_cpos) 1386 return -1; 1387 return 0; 1388 } 1389 1390 static int cmp_refcount_rec_by_cpos(const void *a, const void *b) 1391 { 1392 const struct ocfs2_refcount_rec *l = a, *r = b; 1393 u64 l_cpos = le64_to_cpu(l->r_cpos); 1394 u64 r_cpos = le64_to_cpu(r->r_cpos); 1395 1396 if (l_cpos > r_cpos) 1397 return 1; 1398 if (l_cpos < r_cpos) 1399 return -1; 1400 return 0; 1401 } 1402 1403 static void swap_refcount_rec(void *a, void *b, int size) 1404 { 1405 struct ocfs2_refcount_rec *l = a, *r = b; 1406 1407 swap(*l, *r); 1408 } 1409 1410 /* 1411 * The refcount cpos are ordered by their 64bit cpos, 1412 * But we will use the low 32 bit to be the e_cpos in the b-tree. 1413 * So we need to make sure that this pos isn't intersected with others. 1414 * 1415 * Note: The refcount block is already sorted by their low 32 bit cpos, 1416 * So just try the middle pos first, and we will exit when we find 1417 * the good position. 1418 */ 1419 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, 1420 u32 *split_pos, int *split_index) 1421 { 1422 int num_used = le16_to_cpu(rl->rl_used); 1423 int delta, middle = num_used / 2; 1424 1425 for (delta = 0; delta < middle; delta++) { 1426 /* Let's check delta earlier than middle */ 1427 if (ocfs2_refcount_rec_no_intersect( 1428 &rl->rl_recs[middle - delta - 1], 1429 &rl->rl_recs[middle - delta])) { 1430 *split_index = middle - delta; 1431 break; 1432 } 1433 1434 /* For even counts, don't walk off the end */ 1435 if ((middle + delta + 1) == num_used) 1436 continue; 1437 1438 /* Now try delta past middle */ 1439 if (ocfs2_refcount_rec_no_intersect( 1440 &rl->rl_recs[middle + delta], 1441 &rl->rl_recs[middle + delta + 1])) { 1442 *split_index = middle + delta + 1; 1443 break; 1444 } 1445 } 1446 1447 if (delta >= middle) 1448 return -ENOSPC; 1449 1450 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); 1451 return 0; 1452 } 1453 1454 static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, 1455 struct buffer_head *new_bh, 1456 u32 *split_cpos) 1457 { 1458 int split_index = 0, num_moved, ret; 1459 u32 cpos = 0; 1460 struct ocfs2_refcount_block *rb = 1461 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1462 struct ocfs2_refcount_list *rl = &rb->rf_records; 1463 struct ocfs2_refcount_block *new_rb = 1464 (struct ocfs2_refcount_block *)new_bh->b_data; 1465 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; 1466 1467 trace_ocfs2_divide_leaf_refcount_block( 1468 (unsigned long long)ref_leaf_bh->b_blocknr, 1469 le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used)); 1470 1471 /* 1472 * XXX: Improvement later. 1473 * If we know all the high 32 bit cpos is the same, no need to sort. 1474 * 1475 * In order to make the whole process safe, we do: 1476 * 1. sort the entries by their low 32 bit cpos first so that we can 1477 * find the split cpos easily. 1478 * 2. call ocfs2_insert_extent to insert the new refcount block. 1479 * 3. move the refcount rec to the new block. 1480 * 4. sort the entries by their 64 bit cpos. 1481 * 5. dirty the new_rb and rb. 1482 */ 1483 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1484 sizeof(struct ocfs2_refcount_rec), 1485 cmp_refcount_rec_by_low_cpos, swap_refcount_rec); 1486 1487 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); 1488 if (ret) { 1489 mlog_errno(ret); 1490 return ret; 1491 } 1492 1493 new_rb->rf_cpos = cpu_to_le32(cpos); 1494 1495 /* move refcount records starting from split_index to the new block. */ 1496 num_moved = le16_to_cpu(rl->rl_used) - split_index; 1497 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], 1498 num_moved * sizeof(struct ocfs2_refcount_rec)); 1499 1500 /*ok, remove the entries we just moved over to the other block. */ 1501 memset(&rl->rl_recs[split_index], 0, 1502 num_moved * sizeof(struct ocfs2_refcount_rec)); 1503 1504 /* change old and new rl_used accordingly. */ 1505 le16_add_cpu(&rl->rl_used, -num_moved); 1506 new_rl->rl_used = cpu_to_le16(num_moved); 1507 1508 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1509 sizeof(struct ocfs2_refcount_rec), 1510 cmp_refcount_rec_by_cpos, swap_refcount_rec); 1511 1512 sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used), 1513 sizeof(struct ocfs2_refcount_rec), 1514 cmp_refcount_rec_by_cpos, swap_refcount_rec); 1515 1516 *split_cpos = cpos; 1517 return 0; 1518 } 1519 1520 static int ocfs2_new_leaf_refcount_block(handle_t *handle, 1521 struct ocfs2_caching_info *ci, 1522 struct buffer_head *ref_root_bh, 1523 struct buffer_head *ref_leaf_bh, 1524 struct ocfs2_alloc_context *meta_ac) 1525 { 1526 int ret; 1527 u16 suballoc_bit_start; 1528 u32 num_got, new_cpos; 1529 u64 suballoc_loc, blkno; 1530 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1531 struct ocfs2_refcount_block *root_rb = 1532 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1533 struct buffer_head *new_bh = NULL; 1534 struct ocfs2_refcount_block *new_rb; 1535 struct ocfs2_extent_tree ref_et; 1536 1537 BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)); 1538 1539 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1540 OCFS2_JOURNAL_ACCESS_WRITE); 1541 if (ret) { 1542 mlog_errno(ret); 1543 goto out; 1544 } 1545 1546 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1547 OCFS2_JOURNAL_ACCESS_WRITE); 1548 if (ret) { 1549 mlog_errno(ret); 1550 goto out; 1551 } 1552 1553 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1554 &suballoc_bit_start, &num_got, 1555 &blkno); 1556 if (ret) { 1557 mlog_errno(ret); 1558 goto out; 1559 } 1560 1561 new_bh = sb_getblk(sb, blkno); 1562 if (new_bh == NULL) { 1563 ret = -ENOMEM; 1564 mlog_errno(ret); 1565 goto out; 1566 } 1567 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1568 1569 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1570 OCFS2_JOURNAL_ACCESS_CREATE); 1571 if (ret) { 1572 mlog_errno(ret); 1573 goto out; 1574 } 1575 1576 /* Initialize ocfs2_refcount_block. */ 1577 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1578 memset(new_rb, 0, sb->s_blocksize); 1579 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 1580 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1581 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1582 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1583 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); 1584 new_rb->rf_blkno = cpu_to_le64(blkno); 1585 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1586 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1587 new_rb->rf_records.rl_count = 1588 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 1589 new_rb->rf_generation = root_rb->rf_generation; 1590 1591 ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos); 1592 if (ret) { 1593 mlog_errno(ret); 1594 goto out; 1595 } 1596 1597 ocfs2_journal_dirty(handle, ref_leaf_bh); 1598 ocfs2_journal_dirty(handle, new_bh); 1599 1600 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); 1601 1602 trace_ocfs2_new_leaf_refcount_block( 1603 (unsigned long long)new_bh->b_blocknr, new_cpos); 1604 1605 /* Insert the new leaf block with the specific offset cpos. */ 1606 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, 1607 1, 0, meta_ac); 1608 if (ret) 1609 mlog_errno(ret); 1610 1611 out: 1612 brelse(new_bh); 1613 return ret; 1614 } 1615 1616 static int ocfs2_expand_refcount_tree(handle_t *handle, 1617 struct ocfs2_caching_info *ci, 1618 struct buffer_head *ref_root_bh, 1619 struct buffer_head *ref_leaf_bh, 1620 struct ocfs2_alloc_context *meta_ac) 1621 { 1622 int ret; 1623 struct buffer_head *expand_bh = NULL; 1624 1625 if (ref_root_bh == ref_leaf_bh) { 1626 /* 1627 * the old root bh hasn't been expanded to a b-tree, 1628 * so expand it first. 1629 */ 1630 ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh, 1631 &expand_bh, meta_ac); 1632 if (ret) { 1633 mlog_errno(ret); 1634 goto out; 1635 } 1636 } else { 1637 expand_bh = ref_leaf_bh; 1638 get_bh(expand_bh); 1639 } 1640 1641 1642 /* Now add a new refcount block into the tree.*/ 1643 ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh, 1644 expand_bh, meta_ac); 1645 if (ret) 1646 mlog_errno(ret); 1647 out: 1648 brelse(expand_bh); 1649 return ret; 1650 } 1651 1652 /* 1653 * Adjust the extent rec in b-tree representing ref_leaf_bh. 1654 * 1655 * Only called when we have inserted a new refcount rec at index 0 1656 * which means ocfs2_extent_rec.e_cpos may need some change. 1657 */ 1658 static int ocfs2_adjust_refcount_rec(handle_t *handle, 1659 struct ocfs2_caching_info *ci, 1660 struct buffer_head *ref_root_bh, 1661 struct buffer_head *ref_leaf_bh, 1662 struct ocfs2_refcount_rec *rec) 1663 { 1664 int ret = 0, i; 1665 u32 new_cpos, old_cpos; 1666 struct ocfs2_path *path = NULL; 1667 struct ocfs2_extent_tree et; 1668 struct ocfs2_refcount_block *rb = 1669 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1670 struct ocfs2_extent_list *el; 1671 1672 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) 1673 goto out; 1674 1675 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1676 old_cpos = le32_to_cpu(rb->rf_cpos); 1677 new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; 1678 if (old_cpos <= new_cpos) 1679 goto out; 1680 1681 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 1682 1683 path = ocfs2_new_path_from_et(&et); 1684 if (!path) { 1685 ret = -ENOMEM; 1686 mlog_errno(ret); 1687 goto out; 1688 } 1689 1690 ret = ocfs2_find_path(ci, path, old_cpos); 1691 if (ret) { 1692 mlog_errno(ret); 1693 goto out; 1694 } 1695 1696 /* 1697 * 2 more credits, one for the leaf refcount block, one for 1698 * the extent block contains the extent rec. 1699 */ 1700 ret = ocfs2_extend_trans(handle, 2); 1701 if (ret < 0) { 1702 mlog_errno(ret); 1703 goto out; 1704 } 1705 1706 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1707 OCFS2_JOURNAL_ACCESS_WRITE); 1708 if (ret < 0) { 1709 mlog_errno(ret); 1710 goto out; 1711 } 1712 1713 ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path), 1714 OCFS2_JOURNAL_ACCESS_WRITE); 1715 if (ret < 0) { 1716 mlog_errno(ret); 1717 goto out; 1718 } 1719 1720 /* change the leaf extent block first. */ 1721 el = path_leaf_el(path); 1722 1723 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) 1724 if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos) 1725 break; 1726 1727 BUG_ON(i == le16_to_cpu(el->l_next_free_rec)); 1728 1729 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); 1730 1731 /* change the r_cpos in the leaf block. */ 1732 rb->rf_cpos = cpu_to_le32(new_cpos); 1733 1734 ocfs2_journal_dirty(handle, path_leaf_bh(path)); 1735 ocfs2_journal_dirty(handle, ref_leaf_bh); 1736 1737 out: 1738 ocfs2_free_path(path); 1739 return ret; 1740 } 1741 1742 static int ocfs2_insert_refcount_rec(handle_t *handle, 1743 struct ocfs2_caching_info *ci, 1744 struct buffer_head *ref_root_bh, 1745 struct buffer_head *ref_leaf_bh, 1746 struct ocfs2_refcount_rec *rec, 1747 int index, int merge, 1748 struct ocfs2_alloc_context *meta_ac) 1749 { 1750 int ret; 1751 struct ocfs2_refcount_block *rb = 1752 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1753 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1754 struct buffer_head *new_bh = NULL; 1755 1756 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1757 1758 if (rf_list->rl_used == rf_list->rl_count) { 1759 u64 cpos = le64_to_cpu(rec->r_cpos); 1760 u32 len = le32_to_cpu(rec->r_clusters); 1761 1762 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1763 ref_leaf_bh, meta_ac); 1764 if (ret) { 1765 mlog_errno(ret); 1766 goto out; 1767 } 1768 1769 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1770 cpos, len, NULL, &index, 1771 &new_bh); 1772 if (ret) { 1773 mlog_errno(ret); 1774 goto out; 1775 } 1776 1777 ref_leaf_bh = new_bh; 1778 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1779 rf_list = &rb->rf_records; 1780 } 1781 1782 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1783 OCFS2_JOURNAL_ACCESS_WRITE); 1784 if (ret) { 1785 mlog_errno(ret); 1786 goto out; 1787 } 1788 1789 if (index < le16_to_cpu(rf_list->rl_used)) 1790 memmove(&rf_list->rl_recs[index + 1], 1791 &rf_list->rl_recs[index], 1792 (le16_to_cpu(rf_list->rl_used) - index) * 1793 sizeof(struct ocfs2_refcount_rec)); 1794 1795 trace_ocfs2_insert_refcount_rec( 1796 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1797 (unsigned long long)le64_to_cpu(rec->r_cpos), 1798 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount)); 1799 1800 rf_list->rl_recs[index] = *rec; 1801 1802 le16_add_cpu(&rf_list->rl_used, 1); 1803 1804 if (merge) 1805 ocfs2_refcount_rec_merge(rb, index); 1806 1807 ocfs2_journal_dirty(handle, ref_leaf_bh); 1808 1809 if (index == 0) { 1810 ret = ocfs2_adjust_refcount_rec(handle, ci, 1811 ref_root_bh, 1812 ref_leaf_bh, rec); 1813 if (ret) 1814 mlog_errno(ret); 1815 } 1816 out: 1817 brelse(new_bh); 1818 return ret; 1819 } 1820 1821 /* 1822 * Split the refcount_rec indexed by "index" in ref_leaf_bh. 1823 * This is much simple than our b-tree code. 1824 * split_rec is the new refcount rec we want to insert. 1825 * If split_rec->r_refcount > 0, we are changing the refcount(in case we 1826 * increase refcount or decrease a refcount to non-zero). 1827 * If split_rec->r_refcount == 0, we are punching a hole in current refcount 1828 * rec( in case we decrease a refcount to zero). 1829 */ 1830 static int ocfs2_split_refcount_rec(handle_t *handle, 1831 struct ocfs2_caching_info *ci, 1832 struct buffer_head *ref_root_bh, 1833 struct buffer_head *ref_leaf_bh, 1834 struct ocfs2_refcount_rec *split_rec, 1835 int index, int merge, 1836 struct ocfs2_alloc_context *meta_ac, 1837 struct ocfs2_cached_dealloc_ctxt *dealloc) 1838 { 1839 int ret, recs_need; 1840 u32 len; 1841 struct ocfs2_refcount_block *rb = 1842 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1843 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1844 struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index]; 1845 struct ocfs2_refcount_rec *tail_rec = NULL; 1846 struct buffer_head *new_bh = NULL; 1847 1848 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1849 1850 trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos), 1851 le32_to_cpu(orig_rec->r_clusters), 1852 le32_to_cpu(orig_rec->r_refcount), 1853 le64_to_cpu(split_rec->r_cpos), 1854 le32_to_cpu(split_rec->r_clusters), 1855 le32_to_cpu(split_rec->r_refcount)); 1856 1857 /* 1858 * If we just need to split the header or tail clusters, 1859 * no more recs are needed, just split is OK. 1860 * Otherwise we at least need one new recs. 1861 */ 1862 if (!split_rec->r_refcount && 1863 (split_rec->r_cpos == orig_rec->r_cpos || 1864 le64_to_cpu(split_rec->r_cpos) + 1865 le32_to_cpu(split_rec->r_clusters) == 1866 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1867 recs_need = 0; 1868 else 1869 recs_need = 1; 1870 1871 /* 1872 * We need one more rec if we split in the middle and the new rec have 1873 * some refcount in it. 1874 */ 1875 if (split_rec->r_refcount && 1876 (split_rec->r_cpos != orig_rec->r_cpos && 1877 le64_to_cpu(split_rec->r_cpos) + 1878 le32_to_cpu(split_rec->r_clusters) != 1879 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1880 recs_need++; 1881 1882 /* If the leaf block don't have enough record, expand it. */ 1883 if (le16_to_cpu(rf_list->rl_used) + recs_need > 1884 le16_to_cpu(rf_list->rl_count)) { 1885 struct ocfs2_refcount_rec tmp_rec; 1886 u64 cpos = le64_to_cpu(orig_rec->r_cpos); 1887 len = le32_to_cpu(orig_rec->r_clusters); 1888 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1889 ref_leaf_bh, meta_ac); 1890 if (ret) { 1891 mlog_errno(ret); 1892 goto out; 1893 } 1894 1895 /* 1896 * We have to re-get it since now cpos may be moved to 1897 * another leaf block. 1898 */ 1899 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1900 cpos, len, &tmp_rec, &index, 1901 &new_bh); 1902 if (ret) { 1903 mlog_errno(ret); 1904 goto out; 1905 } 1906 1907 ref_leaf_bh = new_bh; 1908 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1909 rf_list = &rb->rf_records; 1910 orig_rec = &rf_list->rl_recs[index]; 1911 } 1912 1913 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1914 OCFS2_JOURNAL_ACCESS_WRITE); 1915 if (ret) { 1916 mlog_errno(ret); 1917 goto out; 1918 } 1919 1920 /* 1921 * We have calculated out how many new records we need and store 1922 * in recs_need, so spare enough space first by moving the records 1923 * after "index" to the end. 1924 */ 1925 if (index != le16_to_cpu(rf_list->rl_used) - 1) 1926 memmove(&rf_list->rl_recs[index + 1 + recs_need], 1927 &rf_list->rl_recs[index + 1], 1928 (le16_to_cpu(rf_list->rl_used) - index - 1) * 1929 sizeof(struct ocfs2_refcount_rec)); 1930 1931 len = (le64_to_cpu(orig_rec->r_cpos) + 1932 le32_to_cpu(orig_rec->r_clusters)) - 1933 (le64_to_cpu(split_rec->r_cpos) + 1934 le32_to_cpu(split_rec->r_clusters)); 1935 1936 /* 1937 * If we have "len", the we will split in the tail and move it 1938 * to the end of the space we have just spared. 1939 */ 1940 if (len) { 1941 tail_rec = &rf_list->rl_recs[index + recs_need]; 1942 1943 memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); 1944 le64_add_cpu(&tail_rec->r_cpos, 1945 le32_to_cpu(tail_rec->r_clusters) - len); 1946 tail_rec->r_clusters = cpu_to_le32(len); 1947 } 1948 1949 /* 1950 * If the split pos isn't the same as the original one, we need to 1951 * split in the head. 1952 * 1953 * Note: We have the chance that split_rec.r_refcount = 0, 1954 * recs_need = 0 and len > 0, which means we just cut the head from 1955 * the orig_rec and in that case we have done some modification in 1956 * orig_rec above, so the check for r_cpos is faked. 1957 */ 1958 if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) { 1959 len = le64_to_cpu(split_rec->r_cpos) - 1960 le64_to_cpu(orig_rec->r_cpos); 1961 orig_rec->r_clusters = cpu_to_le32(len); 1962 index++; 1963 } 1964 1965 le16_add_cpu(&rf_list->rl_used, recs_need); 1966 1967 if (split_rec->r_refcount) { 1968 rf_list->rl_recs[index] = *split_rec; 1969 trace_ocfs2_split_refcount_rec_insert( 1970 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1971 (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1972 le32_to_cpu(split_rec->r_clusters), 1973 le32_to_cpu(split_rec->r_refcount)); 1974 1975 if (merge) 1976 ocfs2_refcount_rec_merge(rb, index); 1977 } 1978 1979 ocfs2_journal_dirty(handle, ref_leaf_bh); 1980 1981 out: 1982 brelse(new_bh); 1983 return ret; 1984 } 1985 1986 static int __ocfs2_increase_refcount(handle_t *handle, 1987 struct ocfs2_caching_info *ci, 1988 struct buffer_head *ref_root_bh, 1989 u64 cpos, u32 len, int merge, 1990 struct ocfs2_alloc_context *meta_ac, 1991 struct ocfs2_cached_dealloc_ctxt *dealloc) 1992 { 1993 int ret = 0, index; 1994 struct buffer_head *ref_leaf_bh = NULL; 1995 struct ocfs2_refcount_rec rec; 1996 unsigned int set_len = 0; 1997 1998 trace_ocfs2_increase_refcount_begin( 1999 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2000 (unsigned long long)cpos, len); 2001 2002 while (len) { 2003 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2004 cpos, len, &rec, &index, 2005 &ref_leaf_bh); 2006 if (ret) { 2007 mlog_errno(ret); 2008 goto out; 2009 } 2010 2011 set_len = le32_to_cpu(rec.r_clusters); 2012 2013 /* 2014 * Here we may meet with 3 situations: 2015 * 2016 * 1. If we find an already existing record, and the length 2017 * is the same, cool, we just need to increase the r_refcount 2018 * and it is OK. 2019 * 2. If we find a hole, just insert it with r_refcount = 1. 2020 * 3. If we are in the middle of one extent record, split 2021 * it. 2022 */ 2023 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && 2024 set_len <= len) { 2025 trace_ocfs2_increase_refcount_change( 2026 (unsigned long long)cpos, set_len, 2027 le32_to_cpu(rec.r_refcount)); 2028 ret = ocfs2_change_refcount_rec(handle, ci, 2029 ref_leaf_bh, index, 2030 merge, 1); 2031 if (ret) { 2032 mlog_errno(ret); 2033 goto out; 2034 } 2035 } else if (!rec.r_refcount) { 2036 rec.r_refcount = cpu_to_le32(1); 2037 2038 trace_ocfs2_increase_refcount_insert( 2039 (unsigned long long)le64_to_cpu(rec.r_cpos), 2040 set_len); 2041 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, 2042 ref_leaf_bh, 2043 &rec, index, 2044 merge, meta_ac); 2045 if (ret) { 2046 mlog_errno(ret); 2047 goto out; 2048 } 2049 } else { 2050 set_len = min((u64)(cpos + len), 2051 le64_to_cpu(rec.r_cpos) + set_len) - cpos; 2052 rec.r_cpos = cpu_to_le64(cpos); 2053 rec.r_clusters = cpu_to_le32(set_len); 2054 le32_add_cpu(&rec.r_refcount, 1); 2055 2056 trace_ocfs2_increase_refcount_split( 2057 (unsigned long long)le64_to_cpu(rec.r_cpos), 2058 set_len, le32_to_cpu(rec.r_refcount)); 2059 ret = ocfs2_split_refcount_rec(handle, ci, 2060 ref_root_bh, ref_leaf_bh, 2061 &rec, index, merge, 2062 meta_ac, dealloc); 2063 if (ret) { 2064 mlog_errno(ret); 2065 goto out; 2066 } 2067 } 2068 2069 cpos += set_len; 2070 len -= set_len; 2071 brelse(ref_leaf_bh); 2072 ref_leaf_bh = NULL; 2073 } 2074 2075 out: 2076 brelse(ref_leaf_bh); 2077 return ret; 2078 } 2079 2080 static int ocfs2_remove_refcount_extent(handle_t *handle, 2081 struct ocfs2_caching_info *ci, 2082 struct buffer_head *ref_root_bh, 2083 struct buffer_head *ref_leaf_bh, 2084 struct ocfs2_alloc_context *meta_ac, 2085 struct ocfs2_cached_dealloc_ctxt *dealloc) 2086 { 2087 int ret; 2088 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2089 struct ocfs2_refcount_block *rb = 2090 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2091 struct ocfs2_extent_tree et; 2092 2093 BUG_ON(rb->rf_records.rl_used); 2094 2095 trace_ocfs2_remove_refcount_extent( 2096 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2097 (unsigned long long)ref_leaf_bh->b_blocknr, 2098 le32_to_cpu(rb->rf_cpos)); 2099 2100 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2101 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), 2102 1, meta_ac, dealloc); 2103 if (ret) { 2104 mlog_errno(ret); 2105 goto out; 2106 } 2107 2108 ocfs2_remove_from_cache(ci, ref_leaf_bh); 2109 2110 /* 2111 * add the freed block to the dealloc so that it will be freed 2112 * when we run dealloc. 2113 */ 2114 ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE, 2115 le16_to_cpu(rb->rf_suballoc_slot), 2116 le64_to_cpu(rb->rf_suballoc_loc), 2117 le64_to_cpu(rb->rf_blkno), 2118 le16_to_cpu(rb->rf_suballoc_bit)); 2119 if (ret) { 2120 mlog_errno(ret); 2121 goto out; 2122 } 2123 2124 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 2125 OCFS2_JOURNAL_ACCESS_WRITE); 2126 if (ret) { 2127 mlog_errno(ret); 2128 goto out; 2129 } 2130 2131 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2132 2133 le32_add_cpu(&rb->rf_clusters, -1); 2134 2135 /* 2136 * check whether we need to restore the root refcount block if 2137 * there is no leaf extent block at atll. 2138 */ 2139 if (!rb->rf_list.l_next_free_rec) { 2140 BUG_ON(rb->rf_clusters); 2141 2142 trace_ocfs2_restore_refcount_block( 2143 (unsigned long long)ref_root_bh->b_blocknr); 2144 2145 rb->rf_flags = 0; 2146 rb->rf_parent = 0; 2147 rb->rf_cpos = 0; 2148 memset(&rb->rf_records, 0, sb->s_blocksize - 2149 offsetof(struct ocfs2_refcount_block, rf_records)); 2150 rb->rf_records.rl_count = 2151 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 2152 } 2153 2154 ocfs2_journal_dirty(handle, ref_root_bh); 2155 2156 out: 2157 return ret; 2158 } 2159 2160 int ocfs2_increase_refcount(handle_t *handle, 2161 struct ocfs2_caching_info *ci, 2162 struct buffer_head *ref_root_bh, 2163 u64 cpos, u32 len, 2164 struct ocfs2_alloc_context *meta_ac, 2165 struct ocfs2_cached_dealloc_ctxt *dealloc) 2166 { 2167 return __ocfs2_increase_refcount(handle, ci, ref_root_bh, 2168 cpos, len, 1, 2169 meta_ac, dealloc); 2170 } 2171 2172 static int ocfs2_decrease_refcount_rec(handle_t *handle, 2173 struct ocfs2_caching_info *ci, 2174 struct buffer_head *ref_root_bh, 2175 struct buffer_head *ref_leaf_bh, 2176 int index, u64 cpos, unsigned int len, 2177 struct ocfs2_alloc_context *meta_ac, 2178 struct ocfs2_cached_dealloc_ctxt *dealloc) 2179 { 2180 int ret; 2181 struct ocfs2_refcount_block *rb = 2182 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2183 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; 2184 2185 BUG_ON(cpos < le64_to_cpu(rec->r_cpos)); 2186 BUG_ON(cpos + len > 2187 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); 2188 2189 trace_ocfs2_decrease_refcount_rec( 2190 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2191 (unsigned long long)cpos, len); 2192 2193 if (cpos == le64_to_cpu(rec->r_cpos) && 2194 len == le32_to_cpu(rec->r_clusters)) 2195 ret = ocfs2_change_refcount_rec(handle, ci, 2196 ref_leaf_bh, index, 1, -1); 2197 else { 2198 struct ocfs2_refcount_rec split = *rec; 2199 split.r_cpos = cpu_to_le64(cpos); 2200 split.r_clusters = cpu_to_le32(len); 2201 2202 le32_add_cpu(&split.r_refcount, -1); 2203 2204 ret = ocfs2_split_refcount_rec(handle, ci, 2205 ref_root_bh, ref_leaf_bh, 2206 &split, index, 1, 2207 meta_ac, dealloc); 2208 } 2209 2210 if (ret) { 2211 mlog_errno(ret); 2212 goto out; 2213 } 2214 2215 /* Remove the leaf refcount block if it contains no refcount record. */ 2216 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { 2217 ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh, 2218 ref_leaf_bh, meta_ac, 2219 dealloc); 2220 if (ret) 2221 mlog_errno(ret); 2222 } 2223 2224 out: 2225 return ret; 2226 } 2227 2228 static int __ocfs2_decrease_refcount(handle_t *handle, 2229 struct ocfs2_caching_info *ci, 2230 struct buffer_head *ref_root_bh, 2231 u64 cpos, u32 len, 2232 struct ocfs2_alloc_context *meta_ac, 2233 struct ocfs2_cached_dealloc_ctxt *dealloc, 2234 int delete) 2235 { 2236 int ret = 0, index = 0; 2237 struct ocfs2_refcount_rec rec; 2238 unsigned int r_count = 0, r_len; 2239 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2240 struct buffer_head *ref_leaf_bh = NULL; 2241 2242 trace_ocfs2_decrease_refcount( 2243 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2244 (unsigned long long)cpos, len, delete); 2245 2246 while (len) { 2247 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2248 cpos, len, &rec, &index, 2249 &ref_leaf_bh); 2250 if (ret) { 2251 mlog_errno(ret); 2252 goto out; 2253 } 2254 2255 r_count = le32_to_cpu(rec.r_refcount); 2256 BUG_ON(r_count == 0); 2257 if (!delete) 2258 BUG_ON(r_count > 1); 2259 2260 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + 2261 le32_to_cpu(rec.r_clusters)) - cpos; 2262 2263 ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh, 2264 ref_leaf_bh, index, 2265 cpos, r_len, 2266 meta_ac, dealloc); 2267 if (ret) { 2268 mlog_errno(ret); 2269 goto out; 2270 } 2271 2272 if (le32_to_cpu(rec.r_refcount) == 1 && delete) { 2273 ret = ocfs2_cache_cluster_dealloc(dealloc, 2274 ocfs2_clusters_to_blocks(sb, cpos), 2275 r_len); 2276 if (ret) { 2277 mlog_errno(ret); 2278 goto out; 2279 } 2280 } 2281 2282 cpos += r_len; 2283 len -= r_len; 2284 brelse(ref_leaf_bh); 2285 ref_leaf_bh = NULL; 2286 } 2287 2288 out: 2289 brelse(ref_leaf_bh); 2290 return ret; 2291 } 2292 2293 /* Caller must hold refcount tree lock. */ 2294 int ocfs2_decrease_refcount(struct inode *inode, 2295 handle_t *handle, u32 cpos, u32 len, 2296 struct ocfs2_alloc_context *meta_ac, 2297 struct ocfs2_cached_dealloc_ctxt *dealloc, 2298 int delete) 2299 { 2300 int ret; 2301 u64 ref_blkno; 2302 struct buffer_head *ref_root_bh = NULL; 2303 struct ocfs2_refcount_tree *tree; 2304 2305 BUG_ON(!ocfs2_is_refcount_inode(inode)); 2306 2307 ret = ocfs2_get_refcount_block(inode, &ref_blkno); 2308 if (ret) { 2309 mlog_errno(ret); 2310 goto out; 2311 } 2312 2313 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree); 2314 if (ret) { 2315 mlog_errno(ret); 2316 goto out; 2317 } 2318 2319 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 2320 &ref_root_bh); 2321 if (ret) { 2322 mlog_errno(ret); 2323 goto out; 2324 } 2325 2326 ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh, 2327 cpos, len, meta_ac, dealloc, delete); 2328 if (ret) 2329 mlog_errno(ret); 2330 out: 2331 brelse(ref_root_bh); 2332 return ret; 2333 } 2334 2335 /* 2336 * Mark the already-existing extent at cpos as refcounted for len clusters. 2337 * This adds the refcount extent flag. 2338 * 2339 * If the existing extent is larger than the request, initiate a 2340 * split. An attempt will be made at merging with adjacent extents. 2341 * 2342 * The caller is responsible for passing down meta_ac if we'll need it. 2343 */ 2344 static int ocfs2_mark_extent_refcounted(struct inode *inode, 2345 struct ocfs2_extent_tree *et, 2346 handle_t *handle, u32 cpos, 2347 u32 len, u32 phys, 2348 struct ocfs2_alloc_context *meta_ac, 2349 struct ocfs2_cached_dealloc_ctxt *dealloc) 2350 { 2351 int ret; 2352 2353 trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno, 2354 cpos, len, phys); 2355 2356 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2357 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2358 inode->i_ino); 2359 goto out; 2360 } 2361 2362 ret = ocfs2_change_extent_flag(handle, et, cpos, 2363 len, phys, meta_ac, dealloc, 2364 OCFS2_EXT_REFCOUNTED, 0); 2365 if (ret) 2366 mlog_errno(ret); 2367 2368 out: 2369 return ret; 2370 } 2371 2372 /* 2373 * Given some contiguous physical clusters, calculate what we need 2374 * for modifying their refcount. 2375 */ 2376 static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, 2377 struct ocfs2_caching_info *ci, 2378 struct buffer_head *ref_root_bh, 2379 u64 start_cpos, 2380 u32 clusters, 2381 int *meta_add, 2382 int *credits) 2383 { 2384 int ret = 0, index, ref_blocks = 0, recs_add = 0; 2385 u64 cpos = start_cpos; 2386 struct ocfs2_refcount_block *rb; 2387 struct ocfs2_refcount_rec rec; 2388 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; 2389 u32 len; 2390 2391 while (clusters) { 2392 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2393 cpos, clusters, &rec, 2394 &index, &ref_leaf_bh); 2395 if (ret) { 2396 mlog_errno(ret); 2397 goto out; 2398 } 2399 2400 if (ref_leaf_bh != prev_bh) { 2401 /* 2402 * Now we encounter a new leaf block, so calculate 2403 * whether we need to extend the old leaf. 2404 */ 2405 if (prev_bh) { 2406 rb = (struct ocfs2_refcount_block *) 2407 prev_bh->b_data; 2408 2409 if (le16_to_cpu(rb->rf_records.rl_used) + 2410 recs_add > 2411 le16_to_cpu(rb->rf_records.rl_count)) 2412 ref_blocks++; 2413 } 2414 2415 recs_add = 0; 2416 *credits += 1; 2417 brelse(prev_bh); 2418 prev_bh = ref_leaf_bh; 2419 get_bh(prev_bh); 2420 } 2421 2422 trace_ocfs2_calc_refcount_meta_credits_iterate( 2423 recs_add, (unsigned long long)cpos, clusters, 2424 (unsigned long long)le64_to_cpu(rec.r_cpos), 2425 le32_to_cpu(rec.r_clusters), 2426 le32_to_cpu(rec.r_refcount), index); 2427 2428 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2429 le32_to_cpu(rec.r_clusters)) - cpos; 2430 /* 2431 * We record all the records which will be inserted to the 2432 * same refcount block, so that we can tell exactly whether 2433 * we need a new refcount block or not. 2434 * 2435 * If we will insert a new one, this is easy and only happens 2436 * during adding refcounted flag to the extent, so we don't 2437 * have a chance of spliting. We just need one record. 2438 * 2439 * If the refcount rec already exists, that would be a little 2440 * complicated. we may have to: 2441 * 1) split at the beginning if the start pos isn't aligned. 2442 * we need 1 more record in this case. 2443 * 2) split int the end if the end pos isn't aligned. 2444 * we need 1 more record in this case. 2445 * 3) split in the middle because of file system fragmentation. 2446 * we need 2 more records in this case(we can't detect this 2447 * beforehand, so always think of the worst case). 2448 */ 2449 if (rec.r_refcount) { 2450 recs_add += 2; 2451 /* Check whether we need a split at the beginning. */ 2452 if (cpos == start_cpos && 2453 cpos != le64_to_cpu(rec.r_cpos)) 2454 recs_add++; 2455 2456 /* Check whether we need a split in the end. */ 2457 if (cpos + clusters < le64_to_cpu(rec.r_cpos) + 2458 le32_to_cpu(rec.r_clusters)) 2459 recs_add++; 2460 } else 2461 recs_add++; 2462 2463 brelse(ref_leaf_bh); 2464 ref_leaf_bh = NULL; 2465 clusters -= len; 2466 cpos += len; 2467 } 2468 2469 if (prev_bh) { 2470 rb = (struct ocfs2_refcount_block *)prev_bh->b_data; 2471 2472 if (le16_to_cpu(rb->rf_records.rl_used) + recs_add > 2473 le16_to_cpu(rb->rf_records.rl_count)) 2474 ref_blocks++; 2475 2476 *credits += 1; 2477 } 2478 2479 if (!ref_blocks) 2480 goto out; 2481 2482 *meta_add += ref_blocks; 2483 *credits += ref_blocks; 2484 2485 /* 2486 * So we may need ref_blocks to insert into the tree. 2487 * That also means we need to change the b-tree and add that number 2488 * of records since we never merge them. 2489 * We need one more block for expansion since the new created leaf 2490 * block is also full and needs split. 2491 */ 2492 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2493 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { 2494 struct ocfs2_extent_tree et; 2495 2496 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2497 *meta_add += ocfs2_extend_meta_needed(et.et_root_el); 2498 *credits += ocfs2_calc_extend_credits(sb, 2499 et.et_root_el); 2500 } else { 2501 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 2502 *meta_add += 1; 2503 } 2504 2505 out: 2506 2507 trace_ocfs2_calc_refcount_meta_credits( 2508 (unsigned long long)start_cpos, clusters, 2509 *meta_add, *credits); 2510 brelse(ref_leaf_bh); 2511 brelse(prev_bh); 2512 return ret; 2513 } 2514 2515 /* 2516 * For refcount tree, we will decrease some contiguous clusters 2517 * refcount count, so just go through it to see how many blocks 2518 * we gonna touch and whether we need to create new blocks. 2519 * 2520 * Normally the refcount blocks store these refcount should be 2521 * contiguous also, so that we can get the number easily. 2522 * We will at most add split 2 refcount records and 2 more 2523 * refcount blocks, so just check it in a rough way. 2524 * 2525 * Caller must hold refcount tree lock. 2526 */ 2527 int ocfs2_prepare_refcount_change_for_del(struct inode *inode, 2528 u64 refcount_loc, 2529 u64 phys_blkno, 2530 u32 clusters, 2531 int *credits, 2532 int *ref_blocks) 2533 { 2534 int ret; 2535 struct buffer_head *ref_root_bh = NULL; 2536 struct ocfs2_refcount_tree *tree; 2537 u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno); 2538 2539 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2540 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2541 inode->i_ino); 2542 goto out; 2543 } 2544 2545 BUG_ON(!ocfs2_is_refcount_inode(inode)); 2546 2547 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), 2548 refcount_loc, &tree); 2549 if (ret) { 2550 mlog_errno(ret); 2551 goto out; 2552 } 2553 2554 ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc, 2555 &ref_root_bh); 2556 if (ret) { 2557 mlog_errno(ret); 2558 goto out; 2559 } 2560 2561 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 2562 &tree->rf_ci, 2563 ref_root_bh, 2564 start_cpos, clusters, 2565 ref_blocks, credits); 2566 if (ret) { 2567 mlog_errno(ret); 2568 goto out; 2569 } 2570 2571 trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits); 2572 2573 out: 2574 brelse(ref_root_bh); 2575 return ret; 2576 } 2577 2578 #define MAX_CONTIG_BYTES 1048576 2579 2580 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb) 2581 { 2582 return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES); 2583 } 2584 2585 static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb) 2586 { 2587 return ~(ocfs2_cow_contig_clusters(sb) - 1); 2588 } 2589 2590 /* 2591 * Given an extent that starts at 'start' and an I/O that starts at 'cpos', 2592 * find an offset (start + (n * contig_clusters)) that is closest to cpos 2593 * while still being less than or equal to it. 2594 * 2595 * The goal is to break the extent at a multiple of contig_clusters. 2596 */ 2597 static inline unsigned int ocfs2_cow_align_start(struct super_block *sb, 2598 unsigned int start, 2599 unsigned int cpos) 2600 { 2601 BUG_ON(start > cpos); 2602 2603 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); 2604 } 2605 2606 /* 2607 * Given a cluster count of len, pad it out so that it is a multiple 2608 * of contig_clusters. 2609 */ 2610 static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, 2611 unsigned int len) 2612 { 2613 unsigned int padded = 2614 (len + (ocfs2_cow_contig_clusters(sb) - 1)) & 2615 ocfs2_cow_contig_mask(sb); 2616 2617 /* Did we wrap? */ 2618 if (padded < len) 2619 padded = UINT_MAX; 2620 2621 return padded; 2622 } 2623 2624 /* 2625 * Calculate out the start and number of virtual clusters we need to to CoW. 2626 * 2627 * cpos is vitual start cluster position we want to do CoW in a 2628 * file and write_len is the cluster length. 2629 * max_cpos is the place where we want to stop CoW intentionally. 2630 * 2631 * Normal we will start CoW from the beginning of extent record cotaining cpos. 2632 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we 2633 * get good I/O from the resulting extent tree. 2634 */ 2635 static int ocfs2_refcount_cal_cow_clusters(struct inode *inode, 2636 struct ocfs2_extent_list *el, 2637 u32 cpos, 2638 u32 write_len, 2639 u32 max_cpos, 2640 u32 *cow_start, 2641 u32 *cow_len) 2642 { 2643 int ret = 0; 2644 int tree_height = le16_to_cpu(el->l_tree_depth), i; 2645 struct buffer_head *eb_bh = NULL; 2646 struct ocfs2_extent_block *eb = NULL; 2647 struct ocfs2_extent_rec *rec; 2648 unsigned int want_clusters, rec_end = 0; 2649 int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb); 2650 int leaf_clusters; 2651 2652 BUG_ON(cpos + write_len > max_cpos); 2653 2654 if (tree_height > 0) { 2655 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh); 2656 if (ret) { 2657 mlog_errno(ret); 2658 goto out; 2659 } 2660 2661 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2662 el = &eb->h_list; 2663 2664 if (el->l_tree_depth) { 2665 ret = ocfs2_error(inode->i_sb, 2666 "Inode %lu has non zero tree depth in leaf block %llu\n", 2667 inode->i_ino, 2668 (unsigned long long)eb_bh->b_blocknr); 2669 goto out; 2670 } 2671 } 2672 2673 *cow_len = 0; 2674 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 2675 rec = &el->l_recs[i]; 2676 2677 if (ocfs2_is_empty_extent(rec)) { 2678 mlog_bug_on_msg(i != 0, "Inode %lu has empty record in " 2679 "index %d\n", inode->i_ino, i); 2680 continue; 2681 } 2682 2683 if (le32_to_cpu(rec->e_cpos) + 2684 le16_to_cpu(rec->e_leaf_clusters) <= cpos) 2685 continue; 2686 2687 if (*cow_len == 0) { 2688 /* 2689 * We should find a refcounted record in the 2690 * first pass. 2691 */ 2692 BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED)); 2693 *cow_start = le32_to_cpu(rec->e_cpos); 2694 } 2695 2696 /* 2697 * If we encounter a hole, a non-refcounted record or 2698 * pass the max_cpos, stop the search. 2699 */ 2700 if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) || 2701 (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) || 2702 (max_cpos <= le32_to_cpu(rec->e_cpos))) 2703 break; 2704 2705 leaf_clusters = le16_to_cpu(rec->e_leaf_clusters); 2706 rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters; 2707 if (rec_end > max_cpos) { 2708 rec_end = max_cpos; 2709 leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos); 2710 } 2711 2712 /* 2713 * How many clusters do we actually need from 2714 * this extent? First we see how many we actually 2715 * need to complete the write. If that's smaller 2716 * than contig_clusters, we try for contig_clusters. 2717 */ 2718 if (!*cow_len) 2719 want_clusters = write_len; 2720 else 2721 want_clusters = (cpos + write_len) - 2722 (*cow_start + *cow_len); 2723 if (want_clusters < contig_clusters) 2724 want_clusters = contig_clusters; 2725 2726 /* 2727 * If the write does not cover the whole extent, we 2728 * need to calculate how we're going to split the extent. 2729 * We try to do it on contig_clusters boundaries. 2730 * 2731 * Any extent smaller than contig_clusters will be 2732 * CoWed in its entirety. 2733 */ 2734 if (leaf_clusters <= contig_clusters) 2735 *cow_len += leaf_clusters; 2736 else if (*cow_len || (*cow_start == cpos)) { 2737 /* 2738 * This extent needs to be CoW'd from its 2739 * beginning, so all we have to do is compute 2740 * how many clusters to grab. We align 2741 * want_clusters to the edge of contig_clusters 2742 * to get better I/O. 2743 */ 2744 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2745 want_clusters); 2746 2747 if (leaf_clusters < want_clusters) 2748 *cow_len += leaf_clusters; 2749 else 2750 *cow_len += want_clusters; 2751 } else if ((*cow_start + contig_clusters) >= 2752 (cpos + write_len)) { 2753 /* 2754 * Breaking off contig_clusters at the front 2755 * of the extent will cover our write. That's 2756 * easy. 2757 */ 2758 *cow_len = contig_clusters; 2759 } else if ((rec_end - cpos) <= contig_clusters) { 2760 /* 2761 * Breaking off contig_clusters at the tail of 2762 * this extent will cover cpos. 2763 */ 2764 *cow_start = rec_end - contig_clusters; 2765 *cow_len = contig_clusters; 2766 } else if ((rec_end - cpos) <= want_clusters) { 2767 /* 2768 * While we can't fit the entire write in this 2769 * extent, we know that the write goes from cpos 2770 * to the end of the extent. Break that off. 2771 * We try to break it at some multiple of 2772 * contig_clusters from the front of the extent. 2773 * Failing that (ie, cpos is within 2774 * contig_clusters of the front), we'll CoW the 2775 * entire extent. 2776 */ 2777 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2778 *cow_start, cpos); 2779 *cow_len = rec_end - *cow_start; 2780 } else { 2781 /* 2782 * Ok, the entire write lives in the middle of 2783 * this extent. Let's try to slice the extent up 2784 * nicely. Optimally, our CoW region starts at 2785 * m*contig_clusters from the beginning of the 2786 * extent and goes for n*contig_clusters, 2787 * covering the entire write. 2788 */ 2789 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2790 *cow_start, cpos); 2791 2792 want_clusters = (cpos + write_len) - *cow_start; 2793 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2794 want_clusters); 2795 if (*cow_start + want_clusters <= rec_end) 2796 *cow_len = want_clusters; 2797 else 2798 *cow_len = rec_end - *cow_start; 2799 } 2800 2801 /* Have we covered our entire write yet? */ 2802 if ((*cow_start + *cow_len) >= (cpos + write_len)) 2803 break; 2804 2805 /* 2806 * If we reach the end of the extent block and don't get enough 2807 * clusters, continue with the next extent block if possible. 2808 */ 2809 if (i + 1 == le16_to_cpu(el->l_next_free_rec) && 2810 eb && eb->h_next_leaf_blk) { 2811 brelse(eb_bh); 2812 eb_bh = NULL; 2813 2814 ret = ocfs2_read_extent_block(INODE_CACHE(inode), 2815 le64_to_cpu(eb->h_next_leaf_blk), 2816 &eb_bh); 2817 if (ret) { 2818 mlog_errno(ret); 2819 goto out; 2820 } 2821 2822 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2823 el = &eb->h_list; 2824 i = -1; 2825 } 2826 } 2827 2828 out: 2829 brelse(eb_bh); 2830 return ret; 2831 } 2832 2833 /* 2834 * Prepare meta_ac, data_ac and calculate credits when we want to add some 2835 * num_clusters in data_tree "et" and change the refcount for the old 2836 * clusters(starting form p_cluster) in the refcount tree. 2837 * 2838 * Note: 2839 * 1. since we may split the old tree, so we at most will need num_clusters + 2 2840 * more new leaf records. 2841 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so 2842 * just give data_ac = NULL. 2843 */ 2844 static int ocfs2_lock_refcount_allocators(struct super_block *sb, 2845 u32 p_cluster, u32 num_clusters, 2846 struct ocfs2_extent_tree *et, 2847 struct ocfs2_caching_info *ref_ci, 2848 struct buffer_head *ref_root_bh, 2849 struct ocfs2_alloc_context **meta_ac, 2850 struct ocfs2_alloc_context **data_ac, 2851 int *credits) 2852 { 2853 int ret = 0, meta_add = 0; 2854 int num_free_extents = ocfs2_num_free_extents(et); 2855 2856 if (num_free_extents < 0) { 2857 ret = num_free_extents; 2858 mlog_errno(ret); 2859 goto out; 2860 } 2861 2862 if (num_free_extents < num_clusters + 2) 2863 meta_add = 2864 ocfs2_extend_meta_needed(et->et_root_el); 2865 2866 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el); 2867 2868 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, 2869 p_cluster, num_clusters, 2870 &meta_add, credits); 2871 if (ret) { 2872 mlog_errno(ret); 2873 goto out; 2874 } 2875 2876 trace_ocfs2_lock_refcount_allocators(meta_add, *credits); 2877 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, 2878 meta_ac); 2879 if (ret) { 2880 mlog_errno(ret); 2881 goto out; 2882 } 2883 2884 if (data_ac) { 2885 ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters, 2886 data_ac); 2887 if (ret) 2888 mlog_errno(ret); 2889 } 2890 2891 out: 2892 if (ret) { 2893 if (*meta_ac) { 2894 ocfs2_free_alloc_context(*meta_ac); 2895 *meta_ac = NULL; 2896 } 2897 } 2898 2899 return ret; 2900 } 2901 2902 static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) 2903 { 2904 BUG_ON(buffer_dirty(bh)); 2905 2906 clear_buffer_mapped(bh); 2907 2908 return 0; 2909 } 2910 2911 int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2912 struct inode *inode, 2913 u32 cpos, u32 old_cluster, 2914 u32 new_cluster, u32 new_len) 2915 { 2916 int ret = 0, partial; 2917 struct super_block *sb = inode->i_sb; 2918 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2919 struct page *page; 2920 pgoff_t page_index; 2921 unsigned int from, to; 2922 loff_t offset, end, map_end; 2923 struct address_space *mapping = inode->i_mapping; 2924 2925 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 2926 new_cluster, new_len); 2927 2928 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 2929 end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); 2930 /* 2931 * We only duplicate pages until we reach the page contains i_size - 1. 2932 * So trim 'end' to i_size. 2933 */ 2934 if (end > i_size_read(inode)) 2935 end = i_size_read(inode); 2936 2937 while (offset < end) { 2938 page_index = offset >> PAGE_SHIFT; 2939 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; 2940 if (map_end > end) 2941 map_end = end; 2942 2943 /* from, to is the offset within the page. */ 2944 from = offset & (PAGE_SIZE - 1); 2945 to = PAGE_SIZE; 2946 if (map_end & (PAGE_SIZE - 1)) 2947 to = map_end & (PAGE_SIZE - 1); 2948 2949 retry: 2950 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2951 if (!page) { 2952 ret = -ENOMEM; 2953 mlog_errno(ret); 2954 break; 2955 } 2956 2957 /* 2958 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty 2959 * page, so write it back. 2960 */ 2961 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { 2962 if (PageDirty(page)) { 2963 /* 2964 * write_on_page will unlock the page on return 2965 */ 2966 ret = write_one_page(page); 2967 goto retry; 2968 } 2969 } 2970 2971 if (!PageUptodate(page)) { 2972 ret = block_read_full_page(page, ocfs2_get_block); 2973 if (ret) { 2974 mlog_errno(ret); 2975 goto unlock; 2976 } 2977 lock_page(page); 2978 } 2979 2980 if (page_has_buffers(page)) { 2981 ret = walk_page_buffers(handle, page_buffers(page), 2982 from, to, &partial, 2983 ocfs2_clear_cow_buffer); 2984 if (ret) { 2985 mlog_errno(ret); 2986 goto unlock; 2987 } 2988 } 2989 2990 ocfs2_map_and_dirty_page(inode, 2991 handle, from, to, 2992 page, 0, &new_block); 2993 mark_page_accessed(page); 2994 unlock: 2995 unlock_page(page); 2996 put_page(page); 2997 page = NULL; 2998 offset = map_end; 2999 if (ret) 3000 break; 3001 } 3002 3003 return ret; 3004 } 3005 3006 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3007 struct inode *inode, 3008 u32 cpos, u32 old_cluster, 3009 u32 new_cluster, u32 new_len) 3010 { 3011 int ret = 0; 3012 struct super_block *sb = inode->i_sb; 3013 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3014 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3015 u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); 3016 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 3017 struct ocfs2_super *osb = OCFS2_SB(sb); 3018 struct buffer_head *old_bh = NULL; 3019 struct buffer_head *new_bh = NULL; 3020 3021 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 3022 new_cluster, new_len); 3023 3024 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3025 new_bh = sb_getblk(osb->sb, new_block); 3026 if (new_bh == NULL) { 3027 ret = -ENOMEM; 3028 mlog_errno(ret); 3029 break; 3030 } 3031 3032 ocfs2_set_new_buffer_uptodate(ci, new_bh); 3033 3034 ret = ocfs2_read_block(ci, old_block, &old_bh, NULL); 3035 if (ret) { 3036 mlog_errno(ret); 3037 break; 3038 } 3039 3040 ret = ocfs2_journal_access(handle, ci, new_bh, 3041 OCFS2_JOURNAL_ACCESS_CREATE); 3042 if (ret) { 3043 mlog_errno(ret); 3044 break; 3045 } 3046 3047 memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize); 3048 ocfs2_journal_dirty(handle, new_bh); 3049 3050 brelse(new_bh); 3051 brelse(old_bh); 3052 new_bh = NULL; 3053 old_bh = NULL; 3054 } 3055 3056 brelse(new_bh); 3057 brelse(old_bh); 3058 return ret; 3059 } 3060 3061 static int ocfs2_clear_ext_refcount(handle_t *handle, 3062 struct ocfs2_extent_tree *et, 3063 u32 cpos, u32 p_cluster, u32 len, 3064 unsigned int ext_flags, 3065 struct ocfs2_alloc_context *meta_ac, 3066 struct ocfs2_cached_dealloc_ctxt *dealloc) 3067 { 3068 int ret, index; 3069 struct ocfs2_extent_rec replace_rec; 3070 struct ocfs2_path *path = NULL; 3071 struct ocfs2_extent_list *el; 3072 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); 3073 u64 ino = ocfs2_metadata_cache_owner(et->et_ci); 3074 3075 trace_ocfs2_clear_ext_refcount((unsigned long long)ino, 3076 cpos, len, p_cluster, ext_flags); 3077 3078 memset(&replace_rec, 0, sizeof(replace_rec)); 3079 replace_rec.e_cpos = cpu_to_le32(cpos); 3080 replace_rec.e_leaf_clusters = cpu_to_le16(len); 3081 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb, 3082 p_cluster)); 3083 replace_rec.e_flags = ext_flags; 3084 replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED; 3085 3086 path = ocfs2_new_path_from_et(et); 3087 if (!path) { 3088 ret = -ENOMEM; 3089 mlog_errno(ret); 3090 goto out; 3091 } 3092 3093 ret = ocfs2_find_path(et->et_ci, path, cpos); 3094 if (ret) { 3095 mlog_errno(ret); 3096 goto out; 3097 } 3098 3099 el = path_leaf_el(path); 3100 3101 index = ocfs2_search_extent_list(el, cpos); 3102 if (index == -1) { 3103 ret = ocfs2_error(sb, 3104 "Inode %llu has an extent at cpos %u which can no longer be found\n", 3105 (unsigned long long)ino, cpos); 3106 goto out; 3107 } 3108 3109 ret = ocfs2_split_extent(handle, et, path, index, 3110 &replace_rec, meta_ac, dealloc); 3111 if (ret) 3112 mlog_errno(ret); 3113 3114 out: 3115 ocfs2_free_path(path); 3116 return ret; 3117 } 3118 3119 static int ocfs2_replace_clusters(handle_t *handle, 3120 struct ocfs2_cow_context *context, 3121 u32 cpos, u32 old, 3122 u32 new, u32 len, 3123 unsigned int ext_flags) 3124 { 3125 int ret; 3126 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3127 u64 ino = ocfs2_metadata_cache_owner(ci); 3128 3129 trace_ocfs2_replace_clusters((unsigned long long)ino, 3130 cpos, old, new, len, ext_flags); 3131 3132 /*If the old clusters is unwritten, no need to duplicate. */ 3133 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3134 ret = context->cow_duplicate_clusters(handle, context->inode, 3135 cpos, old, new, len); 3136 if (ret) { 3137 mlog_errno(ret); 3138 goto out; 3139 } 3140 } 3141 3142 ret = ocfs2_clear_ext_refcount(handle, &context->data_et, 3143 cpos, new, len, ext_flags, 3144 context->meta_ac, &context->dealloc); 3145 if (ret) 3146 mlog_errno(ret); 3147 out: 3148 return ret; 3149 } 3150 3151 int ocfs2_cow_sync_writeback(struct super_block *sb, 3152 struct inode *inode, 3153 u32 cpos, u32 num_clusters) 3154 { 3155 int ret = 0; 3156 loff_t offset, end, map_end; 3157 pgoff_t page_index; 3158 struct page *page; 3159 3160 if (ocfs2_should_order_data(inode)) 3161 return 0; 3162 3163 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 3164 end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); 3165 3166 ret = filemap_fdatawrite_range(inode->i_mapping, 3167 offset, end - 1); 3168 if (ret < 0) { 3169 mlog_errno(ret); 3170 return ret; 3171 } 3172 3173 while (offset < end) { 3174 page_index = offset >> PAGE_SHIFT; 3175 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; 3176 if (map_end > end) 3177 map_end = end; 3178 3179 page = find_or_create_page(inode->i_mapping, 3180 page_index, GFP_NOFS); 3181 BUG_ON(!page); 3182 3183 wait_on_page_writeback(page); 3184 if (PageError(page)) { 3185 ret = -EIO; 3186 mlog_errno(ret); 3187 } else 3188 mark_page_accessed(page); 3189 3190 unlock_page(page); 3191 put_page(page); 3192 page = NULL; 3193 offset = map_end; 3194 if (ret) 3195 break; 3196 } 3197 3198 return ret; 3199 } 3200 3201 static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, 3202 u32 v_cluster, u32 *p_cluster, 3203 u32 *num_clusters, 3204 unsigned int *extent_flags) 3205 { 3206 return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, 3207 num_clusters, extent_flags); 3208 } 3209 3210 static int ocfs2_make_clusters_writable(struct super_block *sb, 3211 struct ocfs2_cow_context *context, 3212 u32 cpos, u32 p_cluster, 3213 u32 num_clusters, unsigned int e_flags) 3214 { 3215 int ret, delete, index, credits = 0; 3216 u32 new_bit, new_len, orig_num_clusters; 3217 unsigned int set_len; 3218 struct ocfs2_super *osb = OCFS2_SB(sb); 3219 handle_t *handle; 3220 struct buffer_head *ref_leaf_bh = NULL; 3221 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; 3222 struct ocfs2_refcount_rec rec; 3223 3224 trace_ocfs2_make_clusters_writable(cpos, p_cluster, 3225 num_clusters, e_flags); 3226 3227 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, 3228 &context->data_et, 3229 ref_ci, 3230 context->ref_root_bh, 3231 &context->meta_ac, 3232 &context->data_ac, &credits); 3233 if (ret) { 3234 mlog_errno(ret); 3235 return ret; 3236 } 3237 3238 if (context->post_refcount) 3239 credits += context->post_refcount->credits; 3240 3241 credits += context->extra_credits; 3242 handle = ocfs2_start_trans(osb, credits); 3243 if (IS_ERR(handle)) { 3244 ret = PTR_ERR(handle); 3245 mlog_errno(ret); 3246 goto out; 3247 } 3248 3249 orig_num_clusters = num_clusters; 3250 3251 while (num_clusters) { 3252 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, 3253 p_cluster, num_clusters, 3254 &rec, &index, &ref_leaf_bh); 3255 if (ret) { 3256 mlog_errno(ret); 3257 goto out_commit; 3258 } 3259 3260 BUG_ON(!rec.r_refcount); 3261 set_len = min((u64)p_cluster + num_clusters, 3262 le64_to_cpu(rec.r_cpos) + 3263 le32_to_cpu(rec.r_clusters)) - p_cluster; 3264 3265 /* 3266 * There are many different situation here. 3267 * 1. If refcount == 1, remove the flag and don't COW. 3268 * 2. If refcount > 1, allocate clusters. 3269 * Here we may not allocate r_len once at a time, so continue 3270 * until we reach num_clusters. 3271 */ 3272 if (le32_to_cpu(rec.r_refcount) == 1) { 3273 delete = 0; 3274 ret = ocfs2_clear_ext_refcount(handle, 3275 &context->data_et, 3276 cpos, p_cluster, 3277 set_len, e_flags, 3278 context->meta_ac, 3279 &context->dealloc); 3280 if (ret) { 3281 mlog_errno(ret); 3282 goto out_commit; 3283 } 3284 } else { 3285 delete = 1; 3286 3287 ret = __ocfs2_claim_clusters(handle, 3288 context->data_ac, 3289 1, set_len, 3290 &new_bit, &new_len); 3291 if (ret) { 3292 mlog_errno(ret); 3293 goto out_commit; 3294 } 3295 3296 ret = ocfs2_replace_clusters(handle, context, 3297 cpos, p_cluster, new_bit, 3298 new_len, e_flags); 3299 if (ret) { 3300 mlog_errno(ret); 3301 goto out_commit; 3302 } 3303 set_len = new_len; 3304 } 3305 3306 ret = __ocfs2_decrease_refcount(handle, ref_ci, 3307 context->ref_root_bh, 3308 p_cluster, set_len, 3309 context->meta_ac, 3310 &context->dealloc, delete); 3311 if (ret) { 3312 mlog_errno(ret); 3313 goto out_commit; 3314 } 3315 3316 cpos += set_len; 3317 p_cluster += set_len; 3318 num_clusters -= set_len; 3319 brelse(ref_leaf_bh); 3320 ref_leaf_bh = NULL; 3321 } 3322 3323 /* handle any post_cow action. */ 3324 if (context->post_refcount && context->post_refcount->func) { 3325 ret = context->post_refcount->func(context->inode, handle, 3326 context->post_refcount->para); 3327 if (ret) { 3328 mlog_errno(ret); 3329 goto out_commit; 3330 } 3331 } 3332 3333 /* 3334 * Here we should write the new page out first if we are 3335 * in write-back mode. 3336 */ 3337 if (context->get_clusters == ocfs2_di_get_clusters) { 3338 ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos, 3339 orig_num_clusters); 3340 if (ret) 3341 mlog_errno(ret); 3342 } 3343 3344 out_commit: 3345 ocfs2_commit_trans(osb, handle); 3346 3347 out: 3348 if (context->data_ac) { 3349 ocfs2_free_alloc_context(context->data_ac); 3350 context->data_ac = NULL; 3351 } 3352 if (context->meta_ac) { 3353 ocfs2_free_alloc_context(context->meta_ac); 3354 context->meta_ac = NULL; 3355 } 3356 brelse(ref_leaf_bh); 3357 3358 return ret; 3359 } 3360 3361 static int ocfs2_replace_cow(struct ocfs2_cow_context *context) 3362 { 3363 int ret = 0; 3364 struct inode *inode = context->inode; 3365 u32 cow_start = context->cow_start, cow_len = context->cow_len; 3366 u32 p_cluster, num_clusters; 3367 unsigned int ext_flags; 3368 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3369 3370 if (!ocfs2_refcount_tree(osb)) { 3371 return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 3372 inode->i_ino); 3373 } 3374 3375 ocfs2_init_dealloc_ctxt(&context->dealloc); 3376 3377 while (cow_len) { 3378 ret = context->get_clusters(context, cow_start, &p_cluster, 3379 &num_clusters, &ext_flags); 3380 if (ret) { 3381 mlog_errno(ret); 3382 break; 3383 } 3384 3385 BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED)); 3386 3387 if (cow_len < num_clusters) 3388 num_clusters = cow_len; 3389 3390 ret = ocfs2_make_clusters_writable(inode->i_sb, context, 3391 cow_start, p_cluster, 3392 num_clusters, ext_flags); 3393 if (ret) { 3394 mlog_errno(ret); 3395 break; 3396 } 3397 3398 cow_len -= num_clusters; 3399 cow_start += num_clusters; 3400 } 3401 3402 if (ocfs2_dealloc_has_cluster(&context->dealloc)) { 3403 ocfs2_schedule_truncate_log_flush(osb, 1); 3404 ocfs2_run_deallocs(osb, &context->dealloc); 3405 } 3406 3407 return ret; 3408 } 3409 3410 /* 3411 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3412 * past max_cpos. This will stop when it runs into a hole or an 3413 * unrefcounted extent. 3414 */ 3415 static int ocfs2_refcount_cow_hunk(struct inode *inode, 3416 struct buffer_head *di_bh, 3417 u32 cpos, u32 write_len, u32 max_cpos) 3418 { 3419 int ret; 3420 u32 cow_start = 0, cow_len = 0; 3421 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3422 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3423 struct buffer_head *ref_root_bh = NULL; 3424 struct ocfs2_refcount_tree *ref_tree; 3425 struct ocfs2_cow_context *context = NULL; 3426 3427 BUG_ON(!ocfs2_is_refcount_inode(inode)); 3428 3429 ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list, 3430 cpos, write_len, max_cpos, 3431 &cow_start, &cow_len); 3432 if (ret) { 3433 mlog_errno(ret); 3434 goto out; 3435 } 3436 3437 trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno, 3438 cpos, write_len, max_cpos, 3439 cow_start, cow_len); 3440 3441 BUG_ON(cow_len == 0); 3442 3443 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3444 if (!context) { 3445 ret = -ENOMEM; 3446 mlog_errno(ret); 3447 goto out; 3448 } 3449 3450 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 3451 1, &ref_tree, &ref_root_bh); 3452 if (ret) { 3453 mlog_errno(ret); 3454 goto out; 3455 } 3456 3457 context->inode = inode; 3458 context->cow_start = cow_start; 3459 context->cow_len = cow_len; 3460 context->ref_tree = ref_tree; 3461 context->ref_root_bh = ref_root_bh; 3462 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3463 context->get_clusters = ocfs2_di_get_clusters; 3464 3465 ocfs2_init_dinode_extent_tree(&context->data_et, 3466 INODE_CACHE(inode), di_bh); 3467 3468 ret = ocfs2_replace_cow(context); 3469 if (ret) 3470 mlog_errno(ret); 3471 3472 /* 3473 * truncate the extent map here since no matter whether we meet with 3474 * any error during the action, we shouldn't trust cached extent map 3475 * any more. 3476 */ 3477 ocfs2_extent_map_trunc(inode, cow_start); 3478 3479 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3480 brelse(ref_root_bh); 3481 out: 3482 kfree(context); 3483 return ret; 3484 } 3485 3486 /* 3487 * CoW any and all clusters between cpos and cpos+write_len. 3488 * Don't CoW past max_cpos. If this returns successfully, all 3489 * clusters between cpos and cpos+write_len are safe to modify. 3490 */ 3491 int ocfs2_refcount_cow(struct inode *inode, 3492 struct buffer_head *di_bh, 3493 u32 cpos, u32 write_len, u32 max_cpos) 3494 { 3495 int ret = 0; 3496 u32 p_cluster, num_clusters; 3497 unsigned int ext_flags; 3498 3499 while (write_len) { 3500 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3501 &num_clusters, &ext_flags); 3502 if (ret) { 3503 mlog_errno(ret); 3504 break; 3505 } 3506 3507 if (write_len < num_clusters) 3508 num_clusters = write_len; 3509 3510 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3511 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, 3512 num_clusters, max_cpos); 3513 if (ret) { 3514 mlog_errno(ret); 3515 break; 3516 } 3517 } 3518 3519 write_len -= num_clusters; 3520 cpos += num_clusters; 3521 } 3522 3523 return ret; 3524 } 3525 3526 static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context, 3527 u32 v_cluster, u32 *p_cluster, 3528 u32 *num_clusters, 3529 unsigned int *extent_flags) 3530 { 3531 struct inode *inode = context->inode; 3532 struct ocfs2_xattr_value_root *xv = context->cow_object; 3533 3534 return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster, 3535 num_clusters, &xv->xr_list, 3536 extent_flags); 3537 } 3538 3539 /* 3540 * Given a xattr value root, calculate the most meta/credits we need for 3541 * refcount tree change if we truncate it to 0. 3542 */ 3543 int ocfs2_refcounted_xattr_delete_need(struct inode *inode, 3544 struct ocfs2_caching_info *ref_ci, 3545 struct buffer_head *ref_root_bh, 3546 struct ocfs2_xattr_value_root *xv, 3547 int *meta_add, int *credits) 3548 { 3549 int ret = 0, index, ref_blocks = 0; 3550 u32 p_cluster, num_clusters; 3551 u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters); 3552 struct ocfs2_refcount_block *rb; 3553 struct ocfs2_refcount_rec rec; 3554 struct buffer_head *ref_leaf_bh = NULL; 3555 3556 while (cpos < clusters) { 3557 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, 3558 &num_clusters, &xv->xr_list, 3559 NULL); 3560 if (ret) { 3561 mlog_errno(ret); 3562 goto out; 3563 } 3564 3565 cpos += num_clusters; 3566 3567 while (num_clusters) { 3568 ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh, 3569 p_cluster, num_clusters, 3570 &rec, &index, 3571 &ref_leaf_bh); 3572 if (ret) { 3573 mlog_errno(ret); 3574 goto out; 3575 } 3576 3577 BUG_ON(!rec.r_refcount); 3578 3579 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 3580 3581 /* 3582 * We really don't know whether the other clusters is in 3583 * this refcount block or not, so just take the worst 3584 * case that all the clusters are in this block and each 3585 * one will split a refcount rec, so totally we need 3586 * clusters * 2 new refcount rec. 3587 */ 3588 if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 > 3589 le16_to_cpu(rb->rf_records.rl_count)) 3590 ref_blocks++; 3591 3592 *credits += 1; 3593 brelse(ref_leaf_bh); 3594 ref_leaf_bh = NULL; 3595 3596 if (num_clusters <= le32_to_cpu(rec.r_clusters)) 3597 break; 3598 else 3599 num_clusters -= le32_to_cpu(rec.r_clusters); 3600 p_cluster += num_clusters; 3601 } 3602 } 3603 3604 *meta_add += ref_blocks; 3605 if (!ref_blocks) 3606 goto out; 3607 3608 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 3609 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) 3610 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 3611 else { 3612 struct ocfs2_extent_tree et; 3613 3614 ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); 3615 *credits += ocfs2_calc_extend_credits(inode->i_sb, 3616 et.et_root_el); 3617 } 3618 3619 out: 3620 brelse(ref_leaf_bh); 3621 return ret; 3622 } 3623 3624 /* 3625 * Do CoW for xattr. 3626 */ 3627 int ocfs2_refcount_cow_xattr(struct inode *inode, 3628 struct ocfs2_dinode *di, 3629 struct ocfs2_xattr_value_buf *vb, 3630 struct ocfs2_refcount_tree *ref_tree, 3631 struct buffer_head *ref_root_bh, 3632 u32 cpos, u32 write_len, 3633 struct ocfs2_post_refcount *post) 3634 { 3635 int ret; 3636 struct ocfs2_xattr_value_root *xv = vb->vb_xv; 3637 struct ocfs2_cow_context *context = NULL; 3638 u32 cow_start, cow_len; 3639 3640 BUG_ON(!ocfs2_is_refcount_inode(inode)); 3641 3642 ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list, 3643 cpos, write_len, UINT_MAX, 3644 &cow_start, &cow_len); 3645 if (ret) { 3646 mlog_errno(ret); 3647 goto out; 3648 } 3649 3650 BUG_ON(cow_len == 0); 3651 3652 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3653 if (!context) { 3654 ret = -ENOMEM; 3655 mlog_errno(ret); 3656 goto out; 3657 } 3658 3659 context->inode = inode; 3660 context->cow_start = cow_start; 3661 context->cow_len = cow_len; 3662 context->ref_tree = ref_tree; 3663 context->ref_root_bh = ref_root_bh; 3664 context->cow_object = xv; 3665 3666 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd; 3667 /* We need the extra credits for duplicate_clusters by jbd. */ 3668 context->extra_credits = 3669 ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len; 3670 context->get_clusters = ocfs2_xattr_value_get_clusters; 3671 context->post_refcount = post; 3672 3673 ocfs2_init_xattr_value_extent_tree(&context->data_et, 3674 INODE_CACHE(inode), vb); 3675 3676 ret = ocfs2_replace_cow(context); 3677 if (ret) 3678 mlog_errno(ret); 3679 3680 out: 3681 kfree(context); 3682 return ret; 3683 } 3684 3685 /* 3686 * Insert a new extent into refcount tree and mark a extent rec 3687 * as refcounted in the dinode tree. 3688 */ 3689 int ocfs2_add_refcount_flag(struct inode *inode, 3690 struct ocfs2_extent_tree *data_et, 3691 struct ocfs2_caching_info *ref_ci, 3692 struct buffer_head *ref_root_bh, 3693 u32 cpos, u32 p_cluster, u32 num_clusters, 3694 struct ocfs2_cached_dealloc_ctxt *dealloc, 3695 struct ocfs2_post_refcount *post) 3696 { 3697 int ret; 3698 handle_t *handle; 3699 int credits = 1, ref_blocks = 0; 3700 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3701 struct ocfs2_alloc_context *meta_ac = NULL; 3702 3703 /* We need to be able to handle at least an extent tree split. */ 3704 ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el); 3705 3706 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 3707 ref_ci, ref_root_bh, 3708 p_cluster, num_clusters, 3709 &ref_blocks, &credits); 3710 if (ret) { 3711 mlog_errno(ret); 3712 goto out; 3713 } 3714 3715 trace_ocfs2_add_refcount_flag(ref_blocks, credits); 3716 3717 if (ref_blocks) { 3718 ret = ocfs2_reserve_new_metadata_blocks(osb, 3719 ref_blocks, &meta_ac); 3720 if (ret) { 3721 mlog_errno(ret); 3722 goto out; 3723 } 3724 } 3725 3726 if (post) 3727 credits += post->credits; 3728 3729 handle = ocfs2_start_trans(osb, credits); 3730 if (IS_ERR(handle)) { 3731 ret = PTR_ERR(handle); 3732 mlog_errno(ret); 3733 goto out; 3734 } 3735 3736 ret = ocfs2_mark_extent_refcounted(inode, data_et, handle, 3737 cpos, num_clusters, p_cluster, 3738 meta_ac, dealloc); 3739 if (ret) { 3740 mlog_errno(ret); 3741 goto out_commit; 3742 } 3743 3744 ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3745 p_cluster, num_clusters, 0, 3746 meta_ac, dealloc); 3747 if (ret) { 3748 mlog_errno(ret); 3749 goto out_commit; 3750 } 3751 3752 if (post && post->func) { 3753 ret = post->func(inode, handle, post->para); 3754 if (ret) 3755 mlog_errno(ret); 3756 } 3757 3758 out_commit: 3759 ocfs2_commit_trans(osb, handle); 3760 out: 3761 if (meta_ac) 3762 ocfs2_free_alloc_context(meta_ac); 3763 return ret; 3764 } 3765 3766 static int ocfs2_change_ctime(struct inode *inode, 3767 struct buffer_head *di_bh) 3768 { 3769 int ret; 3770 handle_t *handle; 3771 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3772 3773 handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), 3774 OCFS2_INODE_UPDATE_CREDITS); 3775 if (IS_ERR(handle)) { 3776 ret = PTR_ERR(handle); 3777 mlog_errno(ret); 3778 goto out; 3779 } 3780 3781 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 3782 OCFS2_JOURNAL_ACCESS_WRITE); 3783 if (ret) { 3784 mlog_errno(ret); 3785 goto out_commit; 3786 } 3787 3788 inode->i_ctime = current_time(inode); 3789 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 3790 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 3791 3792 ocfs2_journal_dirty(handle, di_bh); 3793 3794 out_commit: 3795 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 3796 out: 3797 return ret; 3798 } 3799 3800 static int ocfs2_attach_refcount_tree(struct inode *inode, 3801 struct buffer_head *di_bh) 3802 { 3803 int ret, data_changed = 0; 3804 struct buffer_head *ref_root_bh = NULL; 3805 struct ocfs2_inode_info *oi = OCFS2_I(inode); 3806 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3807 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3808 struct ocfs2_refcount_tree *ref_tree; 3809 unsigned int ext_flags; 3810 loff_t size; 3811 u32 cpos, num_clusters, clusters, p_cluster; 3812 struct ocfs2_cached_dealloc_ctxt dealloc; 3813 struct ocfs2_extent_tree di_et; 3814 3815 ocfs2_init_dealloc_ctxt(&dealloc); 3816 3817 if (!ocfs2_is_refcount_inode(inode)) { 3818 ret = ocfs2_create_refcount_tree(inode, di_bh); 3819 if (ret) { 3820 mlog_errno(ret); 3821 goto out; 3822 } 3823 } 3824 3825 BUG_ON(!di->i_refcount_loc); 3826 ret = ocfs2_lock_refcount_tree(osb, 3827 le64_to_cpu(di->i_refcount_loc), 1, 3828 &ref_tree, &ref_root_bh); 3829 if (ret) { 3830 mlog_errno(ret); 3831 goto out; 3832 } 3833 3834 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 3835 goto attach_xattr; 3836 3837 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3838 3839 size = i_size_read(inode); 3840 clusters = ocfs2_clusters_for_bytes(inode->i_sb, size); 3841 3842 cpos = 0; 3843 while (cpos < clusters) { 3844 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3845 &num_clusters, &ext_flags); 3846 if (ret) { 3847 mlog_errno(ret); 3848 goto unlock; 3849 } 3850 if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { 3851 ret = ocfs2_add_refcount_flag(inode, &di_et, 3852 &ref_tree->rf_ci, 3853 ref_root_bh, cpos, 3854 p_cluster, num_clusters, 3855 &dealloc, NULL); 3856 if (ret) { 3857 mlog_errno(ret); 3858 goto unlock; 3859 } 3860 3861 data_changed = 1; 3862 } 3863 cpos += num_clusters; 3864 } 3865 3866 attach_xattr: 3867 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3868 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3869 &ref_tree->rf_ci, 3870 ref_root_bh, 3871 &dealloc); 3872 if (ret) { 3873 mlog_errno(ret); 3874 goto unlock; 3875 } 3876 } 3877 3878 if (data_changed) { 3879 ret = ocfs2_change_ctime(inode, di_bh); 3880 if (ret) 3881 mlog_errno(ret); 3882 } 3883 3884 unlock: 3885 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3886 brelse(ref_root_bh); 3887 3888 if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) { 3889 ocfs2_schedule_truncate_log_flush(osb, 1); 3890 ocfs2_run_deallocs(osb, &dealloc); 3891 } 3892 out: 3893 /* 3894 * Empty the extent map so that we may get the right extent 3895 * record from the disk. 3896 */ 3897 ocfs2_extent_map_trunc(inode, 0); 3898 3899 return ret; 3900 } 3901 3902 static int ocfs2_add_refcounted_extent(struct inode *inode, 3903 struct ocfs2_extent_tree *et, 3904 struct ocfs2_caching_info *ref_ci, 3905 struct buffer_head *ref_root_bh, 3906 u32 cpos, u32 p_cluster, u32 num_clusters, 3907 unsigned int ext_flags, 3908 struct ocfs2_cached_dealloc_ctxt *dealloc) 3909 { 3910 int ret; 3911 handle_t *handle; 3912 int credits = 0; 3913 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3914 struct ocfs2_alloc_context *meta_ac = NULL; 3915 3916 ret = ocfs2_lock_refcount_allocators(inode->i_sb, 3917 p_cluster, num_clusters, 3918 et, ref_ci, 3919 ref_root_bh, &meta_ac, 3920 NULL, &credits); 3921 if (ret) { 3922 mlog_errno(ret); 3923 goto out; 3924 } 3925 3926 handle = ocfs2_start_trans(osb, credits); 3927 if (IS_ERR(handle)) { 3928 ret = PTR_ERR(handle); 3929 mlog_errno(ret); 3930 goto out; 3931 } 3932 3933 ret = ocfs2_insert_extent(handle, et, cpos, 3934 ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), 3935 num_clusters, ext_flags, meta_ac); 3936 if (ret) { 3937 mlog_errno(ret); 3938 goto out_commit; 3939 } 3940 3941 ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3942 p_cluster, num_clusters, 3943 meta_ac, dealloc); 3944 if (ret) { 3945 mlog_errno(ret); 3946 goto out_commit; 3947 } 3948 3949 ret = dquot_alloc_space_nodirty(inode, 3950 ocfs2_clusters_to_bytes(osb->sb, num_clusters)); 3951 if (ret) 3952 mlog_errno(ret); 3953 3954 out_commit: 3955 ocfs2_commit_trans(osb, handle); 3956 out: 3957 if (meta_ac) 3958 ocfs2_free_alloc_context(meta_ac); 3959 return ret; 3960 } 3961 3962 static int ocfs2_duplicate_inline_data(struct inode *s_inode, 3963 struct buffer_head *s_bh, 3964 struct inode *t_inode, 3965 struct buffer_head *t_bh) 3966 { 3967 int ret; 3968 handle_t *handle; 3969 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 3970 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 3971 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data; 3972 3973 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 3974 3975 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 3976 if (IS_ERR(handle)) { 3977 ret = PTR_ERR(handle); 3978 mlog_errno(ret); 3979 goto out; 3980 } 3981 3982 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 3983 OCFS2_JOURNAL_ACCESS_WRITE); 3984 if (ret) { 3985 mlog_errno(ret); 3986 goto out_commit; 3987 } 3988 3989 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count; 3990 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data, 3991 le16_to_cpu(s_di->id2.i_data.id_count)); 3992 spin_lock(&OCFS2_I(t_inode)->ip_lock); 3993 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL; 3994 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features); 3995 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 3996 3997 ocfs2_journal_dirty(handle, t_bh); 3998 3999 out_commit: 4000 ocfs2_commit_trans(osb, handle); 4001 out: 4002 return ret; 4003 } 4004 4005 static int ocfs2_duplicate_extent_list(struct inode *s_inode, 4006 struct inode *t_inode, 4007 struct buffer_head *t_bh, 4008 struct ocfs2_caching_info *ref_ci, 4009 struct buffer_head *ref_root_bh, 4010 struct ocfs2_cached_dealloc_ctxt *dealloc) 4011 { 4012 int ret = 0; 4013 u32 p_cluster, num_clusters, clusters, cpos; 4014 loff_t size; 4015 unsigned int ext_flags; 4016 struct ocfs2_extent_tree et; 4017 4018 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh); 4019 4020 size = i_size_read(s_inode); 4021 clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size); 4022 4023 cpos = 0; 4024 while (cpos < clusters) { 4025 ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, 4026 &num_clusters, &ext_flags); 4027 if (ret) { 4028 mlog_errno(ret); 4029 goto out; 4030 } 4031 if (p_cluster) { 4032 ret = ocfs2_add_refcounted_extent(t_inode, &et, 4033 ref_ci, ref_root_bh, 4034 cpos, p_cluster, 4035 num_clusters, 4036 ext_flags, 4037 dealloc); 4038 if (ret) { 4039 mlog_errno(ret); 4040 goto out; 4041 } 4042 } 4043 4044 cpos += num_clusters; 4045 } 4046 4047 out: 4048 return ret; 4049 } 4050 4051 /* 4052 * change the new file's attributes to the src. 4053 * 4054 * reflink creates a snapshot of a file, that means the attributes 4055 * must be identical except for three exceptions - nlink, ino, and ctime. 4056 */ 4057 static int ocfs2_complete_reflink(struct inode *s_inode, 4058 struct buffer_head *s_bh, 4059 struct inode *t_inode, 4060 struct buffer_head *t_bh, 4061 bool preserve) 4062 { 4063 int ret; 4064 handle_t *handle; 4065 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 4066 struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data; 4067 loff_t size = i_size_read(s_inode); 4068 4069 handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb), 4070 OCFS2_INODE_UPDATE_CREDITS); 4071 if (IS_ERR(handle)) { 4072 ret = PTR_ERR(handle); 4073 mlog_errno(ret); 4074 return ret; 4075 } 4076 4077 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 4078 OCFS2_JOURNAL_ACCESS_WRITE); 4079 if (ret) { 4080 mlog_errno(ret); 4081 goto out_commit; 4082 } 4083 4084 spin_lock(&OCFS2_I(t_inode)->ip_lock); 4085 OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters; 4086 OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr; 4087 OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; 4088 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 4089 i_size_write(t_inode, size); 4090 t_inode->i_blocks = s_inode->i_blocks; 4091 4092 di->i_xattr_inline_size = s_di->i_xattr_inline_size; 4093 di->i_clusters = s_di->i_clusters; 4094 di->i_size = s_di->i_size; 4095 di->i_dyn_features = s_di->i_dyn_features; 4096 di->i_attr = s_di->i_attr; 4097 4098 if (preserve) { 4099 t_inode->i_uid = s_inode->i_uid; 4100 t_inode->i_gid = s_inode->i_gid; 4101 t_inode->i_mode = s_inode->i_mode; 4102 di->i_uid = s_di->i_uid; 4103 di->i_gid = s_di->i_gid; 4104 di->i_mode = s_di->i_mode; 4105 4106 /* 4107 * update time. 4108 * we want mtime to appear identical to the source and 4109 * update ctime. 4110 */ 4111 t_inode->i_ctime = current_time(t_inode); 4112 4113 di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec); 4114 di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec); 4115 4116 t_inode->i_mtime = s_inode->i_mtime; 4117 di->i_mtime = s_di->i_mtime; 4118 di->i_mtime_nsec = s_di->i_mtime_nsec; 4119 } 4120 4121 ocfs2_journal_dirty(handle, t_bh); 4122 4123 out_commit: 4124 ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle); 4125 return ret; 4126 } 4127 4128 static int ocfs2_create_reflink_node(struct inode *s_inode, 4129 struct buffer_head *s_bh, 4130 struct inode *t_inode, 4131 struct buffer_head *t_bh, 4132 bool preserve) 4133 { 4134 int ret; 4135 struct buffer_head *ref_root_bh = NULL; 4136 struct ocfs2_cached_dealloc_ctxt dealloc; 4137 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 4138 struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data; 4139 struct ocfs2_refcount_tree *ref_tree; 4140 4141 ocfs2_init_dealloc_ctxt(&dealloc); 4142 4143 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4144 le64_to_cpu(di->i_refcount_loc)); 4145 if (ret) { 4146 mlog_errno(ret); 4147 goto out; 4148 } 4149 4150 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4151 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, 4152 t_inode, t_bh); 4153 if (ret) 4154 mlog_errno(ret); 4155 goto out; 4156 } 4157 4158 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4159 1, &ref_tree, &ref_root_bh); 4160 if (ret) { 4161 mlog_errno(ret); 4162 goto out; 4163 } 4164 4165 ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh, 4166 &ref_tree->rf_ci, ref_root_bh, 4167 &dealloc); 4168 if (ret) { 4169 mlog_errno(ret); 4170 goto out_unlock_refcount; 4171 } 4172 4173 out_unlock_refcount: 4174 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4175 brelse(ref_root_bh); 4176 out: 4177 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4178 ocfs2_schedule_truncate_log_flush(osb, 1); 4179 ocfs2_run_deallocs(osb, &dealloc); 4180 } 4181 4182 return ret; 4183 } 4184 4185 static int __ocfs2_reflink(struct dentry *old_dentry, 4186 struct buffer_head *old_bh, 4187 struct inode *new_inode, 4188 bool preserve) 4189 { 4190 int ret; 4191 struct inode *inode = d_inode(old_dentry); 4192 struct buffer_head *new_bh = NULL; 4193 4194 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) { 4195 ret = -EINVAL; 4196 mlog_errno(ret); 4197 goto out; 4198 } 4199 4200 ret = filemap_fdatawrite(inode->i_mapping); 4201 if (ret) { 4202 mlog_errno(ret); 4203 goto out; 4204 } 4205 4206 ret = ocfs2_attach_refcount_tree(inode, old_bh); 4207 if (ret) { 4208 mlog_errno(ret); 4209 goto out; 4210 } 4211 4212 inode_lock_nested(new_inode, I_MUTEX_CHILD); 4213 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, 4214 OI_LS_REFLINK_TARGET); 4215 if (ret) { 4216 mlog_errno(ret); 4217 goto out_unlock; 4218 } 4219 4220 ret = ocfs2_create_reflink_node(inode, old_bh, 4221 new_inode, new_bh, preserve); 4222 if (ret) { 4223 mlog_errno(ret); 4224 goto inode_unlock; 4225 } 4226 4227 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 4228 ret = ocfs2_reflink_xattrs(inode, old_bh, 4229 new_inode, new_bh, 4230 preserve); 4231 if (ret) { 4232 mlog_errno(ret); 4233 goto inode_unlock; 4234 } 4235 } 4236 4237 ret = ocfs2_complete_reflink(inode, old_bh, 4238 new_inode, new_bh, preserve); 4239 if (ret) 4240 mlog_errno(ret); 4241 4242 inode_unlock: 4243 ocfs2_inode_unlock(new_inode, 1); 4244 brelse(new_bh); 4245 out_unlock: 4246 inode_unlock(new_inode); 4247 out: 4248 if (!ret) { 4249 ret = filemap_fdatawait(inode->i_mapping); 4250 if (ret) 4251 mlog_errno(ret); 4252 } 4253 return ret; 4254 } 4255 4256 static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, 4257 struct dentry *new_dentry, bool preserve) 4258 { 4259 int error, had_lock; 4260 struct inode *inode = d_inode(old_dentry); 4261 struct buffer_head *old_bh = NULL; 4262 struct inode *new_orphan_inode = NULL; 4263 struct ocfs2_lock_holder oh; 4264 4265 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4266 return -EOPNOTSUPP; 4267 4268 4269 error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, 4270 &new_orphan_inode); 4271 if (error) { 4272 mlog_errno(error); 4273 goto out; 4274 } 4275 4276 error = ocfs2_rw_lock(inode, 1); 4277 if (error) { 4278 mlog_errno(error); 4279 goto out; 4280 } 4281 4282 error = ocfs2_inode_lock(inode, &old_bh, 1); 4283 if (error) { 4284 mlog_errno(error); 4285 ocfs2_rw_unlock(inode, 1); 4286 goto out; 4287 } 4288 4289 down_write(&OCFS2_I(inode)->ip_xattr_sem); 4290 down_write(&OCFS2_I(inode)->ip_alloc_sem); 4291 error = __ocfs2_reflink(old_dentry, old_bh, 4292 new_orphan_inode, preserve); 4293 up_write(&OCFS2_I(inode)->ip_alloc_sem); 4294 up_write(&OCFS2_I(inode)->ip_xattr_sem); 4295 4296 ocfs2_inode_unlock(inode, 1); 4297 ocfs2_rw_unlock(inode, 1); 4298 brelse(old_bh); 4299 4300 if (error) { 4301 mlog_errno(error); 4302 goto out; 4303 } 4304 4305 had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1, 4306 &oh); 4307 if (had_lock < 0) { 4308 error = had_lock; 4309 mlog_errno(error); 4310 goto out; 4311 } 4312 4313 /* If the security isn't preserved, we need to re-initialize them. */ 4314 if (!preserve) { 4315 error = ocfs2_init_security_and_acl(dir, new_orphan_inode, 4316 &new_dentry->d_name); 4317 if (error) 4318 mlog_errno(error); 4319 } 4320 if (!error) { 4321 error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, 4322 new_dentry); 4323 if (error) 4324 mlog_errno(error); 4325 } 4326 ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock); 4327 4328 out: 4329 if (new_orphan_inode) { 4330 /* 4331 * We need to open_unlock the inode no matter whether we 4332 * succeed or not, so that other nodes can delete it later. 4333 */ 4334 ocfs2_open_unlock(new_orphan_inode); 4335 if (error) 4336 iput(new_orphan_inode); 4337 } 4338 4339 return error; 4340 } 4341 4342 /* 4343 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake 4344 * sys_reflink(). This will go away when vfs_reflink() exists in 4345 * fs/namei.c. 4346 */ 4347 4348 /* copied from may_create in VFS. */ 4349 static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) 4350 { 4351 if (d_really_is_positive(child)) 4352 return -EEXIST; 4353 if (IS_DEADDIR(dir)) 4354 return -ENOENT; 4355 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 4356 } 4357 4358 /** 4359 * ocfs2_vfs_reflink - Create a reference-counted link 4360 * 4361 * @old_dentry: source dentry + inode 4362 * @dir: directory to create the target 4363 * @new_dentry: target dentry 4364 * @preserve: if true, preserve all file attributes 4365 */ 4366 static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, 4367 struct dentry *new_dentry, bool preserve) 4368 { 4369 struct inode *inode = d_inode(old_dentry); 4370 int error; 4371 4372 if (!inode) 4373 return -ENOENT; 4374 4375 error = ocfs2_may_create(dir, new_dentry); 4376 if (error) 4377 return error; 4378 4379 if (dir->i_sb != inode->i_sb) 4380 return -EXDEV; 4381 4382 /* 4383 * A reflink to an append-only or immutable file cannot be created. 4384 */ 4385 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4386 return -EPERM; 4387 4388 /* Only regular files can be reflinked. */ 4389 if (!S_ISREG(inode->i_mode)) 4390 return -EPERM; 4391 4392 /* 4393 * If the caller wants to preserve ownership, they require the 4394 * rights to do so. 4395 */ 4396 if (preserve) { 4397 if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN)) 4398 return -EPERM; 4399 if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN)) 4400 return -EPERM; 4401 } 4402 4403 /* 4404 * If the caller is modifying any aspect of the attributes, they 4405 * are not creating a snapshot. They need read permission on the 4406 * file. 4407 */ 4408 if (!preserve) { 4409 error = inode_permission(inode, MAY_READ); 4410 if (error) 4411 return error; 4412 } 4413 4414 inode_lock(inode); 4415 error = dquot_initialize(dir); 4416 if (!error) 4417 error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); 4418 inode_unlock(inode); 4419 if (!error) 4420 fsnotify_create(dir, new_dentry); 4421 return error; 4422 } 4423 /* 4424 * Most codes are copied from sys_linkat. 4425 */ 4426 int ocfs2_reflink_ioctl(struct inode *inode, 4427 const char __user *oldname, 4428 const char __user *newname, 4429 bool preserve) 4430 { 4431 struct dentry *new_dentry; 4432 struct path old_path, new_path; 4433 int error; 4434 4435 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4436 return -EOPNOTSUPP; 4437 4438 error = user_path_at(AT_FDCWD, oldname, 0, &old_path); 4439 if (error) { 4440 mlog_errno(error); 4441 return error; 4442 } 4443 4444 new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0); 4445 error = PTR_ERR(new_dentry); 4446 if (IS_ERR(new_dentry)) { 4447 mlog_errno(error); 4448 goto out; 4449 } 4450 4451 error = -EXDEV; 4452 if (old_path.mnt != new_path.mnt) { 4453 mlog_errno(error); 4454 goto out_dput; 4455 } 4456 4457 error = ocfs2_vfs_reflink(old_path.dentry, 4458 d_inode(new_path.dentry), 4459 new_dentry, preserve); 4460 out_dput: 4461 done_path_create(&new_path, new_dentry); 4462 out: 4463 path_put(&old_path); 4464 4465 return error; 4466 } 4467 4468 /* Update destination inode size, if necessary. */ 4469 int ocfs2_reflink_update_dest(struct inode *dest, 4470 struct buffer_head *d_bh, 4471 loff_t newlen) 4472 { 4473 handle_t *handle; 4474 int ret; 4475 4476 dest->i_blocks = ocfs2_inode_sector_count(dest); 4477 4478 if (newlen <= i_size_read(dest)) 4479 return 0; 4480 4481 handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb), 4482 OCFS2_INODE_UPDATE_CREDITS); 4483 if (IS_ERR(handle)) { 4484 ret = PTR_ERR(handle); 4485 mlog_errno(ret); 4486 return ret; 4487 } 4488 4489 /* Extend i_size if needed. */ 4490 spin_lock(&OCFS2_I(dest)->ip_lock); 4491 if (newlen > i_size_read(dest)) 4492 i_size_write(dest, newlen); 4493 spin_unlock(&OCFS2_I(dest)->ip_lock); 4494 dest->i_ctime = dest->i_mtime = current_time(dest); 4495 4496 ret = ocfs2_mark_inode_dirty(handle, dest, d_bh); 4497 if (ret) { 4498 mlog_errno(ret); 4499 goto out_commit; 4500 } 4501 4502 out_commit: 4503 ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle); 4504 return ret; 4505 } 4506 4507 /* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */ 4508 static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode, 4509 struct buffer_head *s_bh, 4510 loff_t pos_in, 4511 struct inode *t_inode, 4512 struct buffer_head *t_bh, 4513 loff_t pos_out, 4514 loff_t len, 4515 struct ocfs2_cached_dealloc_ctxt *dealloc) 4516 { 4517 struct ocfs2_extent_tree s_et; 4518 struct ocfs2_extent_tree t_et; 4519 struct ocfs2_dinode *dis; 4520 struct buffer_head *ref_root_bh = NULL; 4521 struct ocfs2_refcount_tree *ref_tree; 4522 struct ocfs2_super *osb; 4523 loff_t remapped_bytes = 0; 4524 loff_t pstart, plen; 4525 u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0; 4526 unsigned int ext_flags; 4527 int ret = 0; 4528 4529 osb = OCFS2_SB(s_inode->i_sb); 4530 dis = (struct ocfs2_dinode *)s_bh->b_data; 4531 ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh); 4532 ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh); 4533 4534 spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in); 4535 tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out); 4536 slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len); 4537 4538 while (spos < slast) { 4539 if (fatal_signal_pending(current)) { 4540 ret = -EINTR; 4541 goto out; 4542 } 4543 4544 /* Look up the extent. */ 4545 ret = ocfs2_get_clusters(s_inode, spos, &p_cluster, 4546 &num_clusters, &ext_flags); 4547 if (ret) { 4548 mlog_errno(ret); 4549 goto out; 4550 } 4551 4552 num_clusters = min_t(u32, num_clusters, slast - spos); 4553 4554 /* Punch out the dest range. */ 4555 pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos); 4556 plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters); 4557 ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen); 4558 if (ret) { 4559 mlog_errno(ret); 4560 goto out; 4561 } 4562 4563 if (p_cluster == 0) 4564 goto next_loop; 4565 4566 /* Lock the refcount btree... */ 4567 ret = ocfs2_lock_refcount_tree(osb, 4568 le64_to_cpu(dis->i_refcount_loc), 4569 1, &ref_tree, &ref_root_bh); 4570 if (ret) { 4571 mlog_errno(ret); 4572 goto out; 4573 } 4574 4575 /* Mark s_inode's extent as refcounted. */ 4576 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) { 4577 ret = ocfs2_add_refcount_flag(s_inode, &s_et, 4578 &ref_tree->rf_ci, 4579 ref_root_bh, spos, 4580 p_cluster, num_clusters, 4581 dealloc, NULL); 4582 if (ret) { 4583 mlog_errno(ret); 4584 goto out_unlock_refcount; 4585 } 4586 } 4587 4588 /* Map in the new extent. */ 4589 ext_flags |= OCFS2_EXT_REFCOUNTED; 4590 ret = ocfs2_add_refcounted_extent(t_inode, &t_et, 4591 &ref_tree->rf_ci, 4592 ref_root_bh, 4593 tpos, p_cluster, 4594 num_clusters, 4595 ext_flags, 4596 dealloc); 4597 if (ret) { 4598 mlog_errno(ret); 4599 goto out_unlock_refcount; 4600 } 4601 4602 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4603 brelse(ref_root_bh); 4604 next_loop: 4605 spos += num_clusters; 4606 tpos += num_clusters; 4607 remapped_clus += num_clusters; 4608 } 4609 4610 goto out; 4611 out_unlock_refcount: 4612 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4613 brelse(ref_root_bh); 4614 out: 4615 remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus); 4616 remapped_bytes = min_t(loff_t, len, remapped_bytes); 4617 4618 return remapped_bytes > 0 ? remapped_bytes : ret; 4619 } 4620 4621 /* Set up refcount tree and remap s_inode to t_inode. */ 4622 loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode, 4623 struct buffer_head *s_bh, 4624 loff_t pos_in, 4625 struct inode *t_inode, 4626 struct buffer_head *t_bh, 4627 loff_t pos_out, 4628 loff_t len) 4629 { 4630 struct ocfs2_cached_dealloc_ctxt dealloc; 4631 struct ocfs2_super *osb; 4632 struct ocfs2_dinode *dis; 4633 struct ocfs2_dinode *dit; 4634 loff_t ret; 4635 4636 osb = OCFS2_SB(s_inode->i_sb); 4637 dis = (struct ocfs2_dinode *)s_bh->b_data; 4638 dit = (struct ocfs2_dinode *)t_bh->b_data; 4639 ocfs2_init_dealloc_ctxt(&dealloc); 4640 4641 /* 4642 * If we're reflinking the entire file and the source is inline 4643 * data, just copy the contents. 4644 */ 4645 if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) && 4646 i_size_read(t_inode) <= len && 4647 (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) { 4648 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh); 4649 if (ret) 4650 mlog_errno(ret); 4651 goto out; 4652 } 4653 4654 /* 4655 * If both inodes belong to two different refcount groups then 4656 * forget it because we don't know how (or want) to go merging 4657 * refcount trees. 4658 */ 4659 ret = -EOPNOTSUPP; 4660 if (ocfs2_is_refcount_inode(s_inode) && 4661 ocfs2_is_refcount_inode(t_inode) && 4662 le64_to_cpu(dis->i_refcount_loc) != 4663 le64_to_cpu(dit->i_refcount_loc)) 4664 goto out; 4665 4666 /* Neither inode has a refcount tree. Add one to s_inode. */ 4667 if (!ocfs2_is_refcount_inode(s_inode) && 4668 !ocfs2_is_refcount_inode(t_inode)) { 4669 ret = ocfs2_create_refcount_tree(s_inode, s_bh); 4670 if (ret) { 4671 mlog_errno(ret); 4672 goto out; 4673 } 4674 } 4675 4676 /* Ensure that both inodes end up with the same refcount tree. */ 4677 if (!ocfs2_is_refcount_inode(s_inode)) { 4678 ret = ocfs2_set_refcount_tree(s_inode, s_bh, 4679 le64_to_cpu(dit->i_refcount_loc)); 4680 if (ret) { 4681 mlog_errno(ret); 4682 goto out; 4683 } 4684 } 4685 if (!ocfs2_is_refcount_inode(t_inode)) { 4686 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4687 le64_to_cpu(dis->i_refcount_loc)); 4688 if (ret) { 4689 mlog_errno(ret); 4690 goto out; 4691 } 4692 } 4693 4694 /* Turn off inline data in the dest file. */ 4695 if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4696 ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh); 4697 if (ret) { 4698 mlog_errno(ret); 4699 goto out; 4700 } 4701 } 4702 4703 /* Actually remap extents now. */ 4704 ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh, 4705 pos_out, len, &dealloc); 4706 if (ret < 0) { 4707 mlog_errno(ret); 4708 goto out; 4709 } 4710 4711 out: 4712 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4713 ocfs2_schedule_truncate_log_flush(osb, 1); 4714 ocfs2_run_deallocs(osb, &dealloc); 4715 } 4716 4717 return ret; 4718 } 4719 4720 /* Lock an inode and grab a bh pointing to the inode. */ 4721 int ocfs2_reflink_inodes_lock(struct inode *s_inode, 4722 struct buffer_head **bh1, 4723 struct inode *t_inode, 4724 struct buffer_head **bh2) 4725 { 4726 struct inode *inode1; 4727 struct inode *inode2; 4728 struct ocfs2_inode_info *oi1; 4729 struct ocfs2_inode_info *oi2; 4730 bool same_inode = (s_inode == t_inode); 4731 int status; 4732 4733 /* First grab the VFS and rw locks. */ 4734 lock_two_nondirectories(s_inode, t_inode); 4735 inode1 = s_inode; 4736 inode2 = t_inode; 4737 if (inode1->i_ino > inode2->i_ino) 4738 swap(inode1, inode2); 4739 4740 status = ocfs2_rw_lock(inode1, 1); 4741 if (status) { 4742 mlog_errno(status); 4743 goto out_i1; 4744 } 4745 if (!same_inode) { 4746 status = ocfs2_rw_lock(inode2, 1); 4747 if (status) { 4748 mlog_errno(status); 4749 goto out_i2; 4750 } 4751 } 4752 4753 /* Now go for the cluster locks */ 4754 oi1 = OCFS2_I(inode1); 4755 oi2 = OCFS2_I(inode2); 4756 4757 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, 4758 (unsigned long long)oi2->ip_blkno); 4759 4760 if (*bh1) 4761 *bh1 = NULL; 4762 if (*bh2) 4763 *bh2 = NULL; 4764 4765 /* We always want to lock the one with the lower lockid first. */ 4766 if (oi1->ip_blkno > oi2->ip_blkno) 4767 mlog_errno(-ENOLCK); 4768 4769 /* lock id1 */ 4770 status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET); 4771 if (status < 0) { 4772 if (status != -ENOENT) 4773 mlog_errno(status); 4774 goto out_rw2; 4775 } 4776 4777 /* lock id2 */ 4778 if (!same_inode) { 4779 status = ocfs2_inode_lock_nested(inode2, bh2, 1, 4780 OI_LS_REFLINK_TARGET); 4781 if (status < 0) { 4782 if (status != -ENOENT) 4783 mlog_errno(status); 4784 goto out_cl1; 4785 } 4786 } else 4787 *bh2 = *bh1; 4788 4789 trace_ocfs2_double_lock_end( 4790 (unsigned long long)oi1->ip_blkno, 4791 (unsigned long long)oi2->ip_blkno); 4792 4793 return 0; 4794 4795 out_cl1: 4796 ocfs2_inode_unlock(inode1, 1); 4797 brelse(*bh1); 4798 *bh1 = NULL; 4799 out_rw2: 4800 ocfs2_rw_unlock(inode2, 1); 4801 out_i2: 4802 ocfs2_rw_unlock(inode1, 1); 4803 out_i1: 4804 unlock_two_nondirectories(s_inode, t_inode); 4805 return status; 4806 } 4807 4808 /* Unlock both inodes and release buffers. */ 4809 void ocfs2_reflink_inodes_unlock(struct inode *s_inode, 4810 struct buffer_head *s_bh, 4811 struct inode *t_inode, 4812 struct buffer_head *t_bh) 4813 { 4814 ocfs2_inode_unlock(s_inode, 1); 4815 ocfs2_rw_unlock(s_inode, 1); 4816 brelse(s_bh); 4817 if (s_inode != t_inode) { 4818 ocfs2_inode_unlock(t_inode, 1); 4819 ocfs2_rw_unlock(t_inode, 1); 4820 brelse(t_bh); 4821 } 4822 unlock_two_nondirectories(s_inode, t_inode); 4823 } 4824