1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * move_extents.c 5 * 6 * Copyright (C) 2011 Oracle. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public 10 * License version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 #include <linux/fs.h> 18 #include <linux/types.h> 19 #include <linux/mount.h> 20 #include <linux/swap.h> 21 22 #include <cluster/masklog.h> 23 24 #include "ocfs2.h" 25 #include "ocfs2_ioctl.h" 26 27 #include "alloc.h" 28 #include "localalloc.h" 29 #include "aops.h" 30 #include "dlmglue.h" 31 #include "extent_map.h" 32 #include "inode.h" 33 #include "journal.h" 34 #include "suballoc.h" 35 #include "uptodate.h" 36 #include "super.h" 37 #include "dir.h" 38 #include "buffer_head_io.h" 39 #include "sysfile.h" 40 #include "refcounttree.h" 41 #include "move_extents.h" 42 43 struct ocfs2_move_extents_context { 44 struct inode *inode; 45 struct file *file; 46 int auto_defrag; 47 int partial; 48 int credits; 49 u32 new_phys_cpos; 50 u32 clusters_moved; 51 u64 refcount_loc; 52 struct ocfs2_move_extents *range; 53 struct ocfs2_extent_tree et; 54 struct ocfs2_alloc_context *meta_ac; 55 struct ocfs2_alloc_context *data_ac; 56 struct ocfs2_cached_dealloc_ctxt dealloc; 57 }; 58 59 static int __ocfs2_move_extent(handle_t *handle, 60 struct ocfs2_move_extents_context *context, 61 u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos, 62 int ext_flags) 63 { 64 int ret = 0, index; 65 struct inode *inode = context->inode; 66 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 67 struct ocfs2_extent_rec *rec, replace_rec; 68 struct ocfs2_path *path = NULL; 69 struct ocfs2_extent_list *el; 70 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); 71 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); 72 73 ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos, 74 p_cpos, new_p_cpos, len); 75 if (ret) { 76 mlog_errno(ret); 77 goto out; 78 } 79 80 memset(&replace_rec, 0, sizeof(replace_rec)); 81 replace_rec.e_cpos = cpu_to_le32(cpos); 82 replace_rec.e_leaf_clusters = cpu_to_le16(len); 83 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, 84 new_p_cpos)); 85 86 path = ocfs2_new_path_from_et(&context->et); 87 if (!path) { 88 ret = -ENOMEM; 89 mlog_errno(ret); 90 goto out; 91 } 92 93 ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos); 94 if (ret) { 95 mlog_errno(ret); 96 goto out; 97 } 98 99 el = path_leaf_el(path); 100 101 index = ocfs2_search_extent_list(el, cpos); 102 if (index == -1) { 103 ret = ocfs2_error(inode->i_sb, 104 "Inode %llu has an extent at cpos %u which can no longer be found\n", 105 (unsigned long long)ino, cpos); 106 goto out; 107 } 108 109 rec = &el->l_recs[index]; 110 111 BUG_ON(ext_flags != rec->e_flags); 112 /* 113 * after moving/defraging to new location, the extent is not going 114 * to be refcounted anymore. 115 */ 116 replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; 117 118 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), 119 context->et.et_root_bh, 120 OCFS2_JOURNAL_ACCESS_WRITE); 121 if (ret) { 122 mlog_errno(ret); 123 goto out; 124 } 125 126 ret = ocfs2_split_extent(handle, &context->et, path, index, 127 &replace_rec, context->meta_ac, 128 &context->dealloc); 129 if (ret) { 130 mlog_errno(ret); 131 goto out; 132 } 133 134 ocfs2_journal_dirty(handle, context->et.et_root_bh); 135 136 context->new_phys_cpos = new_p_cpos; 137 138 /* 139 * need I to append truncate log for old clusters? 140 */ 141 if (old_blkno) { 142 if (ext_flags & OCFS2_EXT_REFCOUNTED) 143 ret = ocfs2_decrease_refcount(inode, handle, 144 ocfs2_blocks_to_clusters(osb->sb, 145 old_blkno), 146 len, context->meta_ac, 147 &context->dealloc, 1); 148 else 149 ret = ocfs2_truncate_log_append(osb, handle, 150 old_blkno, len); 151 } 152 153 ocfs2_update_inode_fsync_trans(handle, inode, 0); 154 out: 155 ocfs2_free_path(path); 156 return ret; 157 } 158 159 /* 160 * lock allocators, and reserving appropriate number of bits for 161 * meta blocks and data clusters. 162 * 163 * in some cases, we don't need to reserve clusters, just let data_ac 164 * be NULL. 165 */ 166 static int ocfs2_lock_allocators_move_extents(struct inode *inode, 167 struct ocfs2_extent_tree *et, 168 u32 clusters_to_move, 169 u32 extents_to_split, 170 struct ocfs2_alloc_context **meta_ac, 171 struct ocfs2_alloc_context **data_ac, 172 int extra_blocks, 173 int *credits) 174 { 175 int ret, num_free_extents; 176 unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move; 177 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 178 179 num_free_extents = ocfs2_num_free_extents(et); 180 if (num_free_extents < 0) { 181 ret = num_free_extents; 182 mlog_errno(ret); 183 goto out; 184 } 185 186 if (!num_free_extents || 187 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) 188 extra_blocks += ocfs2_extend_meta_needed(et->et_root_el); 189 190 ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac); 191 if (ret) { 192 mlog_errno(ret); 193 goto out; 194 } 195 196 if (data_ac) { 197 ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac); 198 if (ret) { 199 mlog_errno(ret); 200 goto out; 201 } 202 } 203 204 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el); 205 206 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n", 207 extra_blocks, clusters_to_move, *credits); 208 out: 209 if (ret) { 210 if (*meta_ac) { 211 ocfs2_free_alloc_context(*meta_ac); 212 *meta_ac = NULL; 213 } 214 } 215 216 return ret; 217 } 218 219 /* 220 * Using one journal handle to guarantee the data consistency in case 221 * crash happens anywhere. 222 * 223 * XXX: defrag can end up with finishing partial extent as requested, 224 * due to not enough contiguous clusters can be found in allocator. 225 */ 226 static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, 227 u32 cpos, u32 phys_cpos, u32 *len, int ext_flags) 228 { 229 int ret, credits = 0, extra_blocks = 0, partial = context->partial; 230 handle_t *handle; 231 struct inode *inode = context->inode; 232 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 233 struct inode *tl_inode = osb->osb_tl_inode; 234 struct ocfs2_refcount_tree *ref_tree = NULL; 235 u32 new_phys_cpos, new_len; 236 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); 237 int need_free = 0; 238 239 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { 240 BUG_ON(!ocfs2_is_refcount_inode(inode)); 241 BUG_ON(!context->refcount_loc); 242 243 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, 244 &ref_tree, NULL); 245 if (ret) { 246 mlog_errno(ret); 247 return ret; 248 } 249 250 ret = ocfs2_prepare_refcount_change_for_del(inode, 251 context->refcount_loc, 252 phys_blkno, 253 *len, 254 &credits, 255 &extra_blocks); 256 if (ret) { 257 mlog_errno(ret); 258 goto out; 259 } 260 } 261 262 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, 263 &context->meta_ac, 264 &context->data_ac, 265 extra_blocks, &credits); 266 if (ret) { 267 mlog_errno(ret); 268 goto out; 269 } 270 271 /* 272 * should be using allocation reservation strategy there? 273 * 274 * if (context->data_ac) 275 * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; 276 */ 277 278 inode_lock(tl_inode); 279 280 if (ocfs2_truncate_log_needs_flush(osb)) { 281 ret = __ocfs2_flush_truncate_log(osb); 282 if (ret < 0) { 283 mlog_errno(ret); 284 goto out_unlock_mutex; 285 } 286 } 287 288 handle = ocfs2_start_trans(osb, credits); 289 if (IS_ERR(handle)) { 290 ret = PTR_ERR(handle); 291 mlog_errno(ret); 292 goto out_unlock_mutex; 293 } 294 295 ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len, 296 &new_phys_cpos, &new_len); 297 if (ret) { 298 mlog_errno(ret); 299 goto out_commit; 300 } 301 302 /* 303 * allowing partial extent moving is kind of 'pros and cons', it makes 304 * whole defragmentation less likely to fail, on the contrary, the bad 305 * thing is it may make the fs even more fragmented after moving, let 306 * userspace make a good decision here. 307 */ 308 if (new_len != *len) { 309 mlog(0, "len_claimed: %u, len: %u\n", new_len, *len); 310 if (!partial) { 311 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; 312 ret = -ENOSPC; 313 need_free = 1; 314 goto out_commit; 315 } 316 } 317 318 mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos, 319 phys_cpos, new_phys_cpos); 320 321 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos, 322 new_phys_cpos, ext_flags); 323 if (ret) 324 mlog_errno(ret); 325 326 if (partial && (new_len != *len)) 327 *len = new_len; 328 329 /* 330 * Here we should write the new page out first if we are 331 * in write-back mode. 332 */ 333 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len); 334 if (ret) 335 mlog_errno(ret); 336 337 out_commit: 338 if (need_free && context->data_ac) { 339 struct ocfs2_alloc_context *data_ac = context->data_ac; 340 341 if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL) 342 ocfs2_free_local_alloc_bits(osb, handle, data_ac, 343 new_phys_cpos, new_len); 344 else 345 ocfs2_free_clusters(handle, 346 data_ac->ac_inode, 347 data_ac->ac_bh, 348 ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos), 349 new_len); 350 } 351 352 ocfs2_commit_trans(osb, handle); 353 354 out_unlock_mutex: 355 inode_unlock(tl_inode); 356 357 if (context->data_ac) { 358 ocfs2_free_alloc_context(context->data_ac); 359 context->data_ac = NULL; 360 } 361 362 if (context->meta_ac) { 363 ocfs2_free_alloc_context(context->meta_ac); 364 context->meta_ac = NULL; 365 } 366 367 out: 368 if (ref_tree) 369 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 370 371 return ret; 372 } 373 374 /* 375 * find the victim alloc group, where #blkno fits. 376 */ 377 static int ocfs2_find_victim_alloc_group(struct inode *inode, 378 u64 vict_blkno, 379 int type, int slot, 380 int *vict_bit, 381 struct buffer_head **ret_bh) 382 { 383 int ret, i, bits_per_unit = 0; 384 u64 blkno; 385 char namebuf[40]; 386 387 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 388 struct buffer_head *ac_bh = NULL, *gd_bh = NULL; 389 struct ocfs2_chain_list *cl; 390 struct ocfs2_chain_rec *rec; 391 struct ocfs2_dinode *ac_dinode; 392 struct ocfs2_group_desc *bg; 393 394 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot); 395 ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf, 396 strlen(namebuf), &blkno); 397 if (ret) { 398 ret = -ENOENT; 399 goto out; 400 } 401 402 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh); 403 if (ret) { 404 mlog_errno(ret); 405 goto out; 406 } 407 408 ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data; 409 cl = &(ac_dinode->id2.i_chain); 410 rec = &(cl->cl_recs[0]); 411 412 if (type == GLOBAL_BITMAP_SYSTEM_INODE) 413 bits_per_unit = osb->s_clustersize_bits - 414 inode->i_sb->s_blocksize_bits; 415 /* 416 * 'vict_blkno' was out of the valid range. 417 */ 418 if ((vict_blkno < le64_to_cpu(rec->c_blkno)) || 419 (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) << 420 bits_per_unit))) { 421 ret = -EINVAL; 422 goto out; 423 } 424 425 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) { 426 427 rec = &(cl->cl_recs[i]); 428 if (!rec) 429 continue; 430 431 bg = NULL; 432 433 do { 434 if (!bg) 435 blkno = le64_to_cpu(rec->c_blkno); 436 else 437 blkno = le64_to_cpu(bg->bg_next_group); 438 439 if (gd_bh) { 440 brelse(gd_bh); 441 gd_bh = NULL; 442 } 443 444 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh); 445 if (ret) { 446 mlog_errno(ret); 447 goto out; 448 } 449 450 bg = (struct ocfs2_group_desc *)gd_bh->b_data; 451 452 if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + 453 le16_to_cpu(bg->bg_bits))) { 454 455 *ret_bh = gd_bh; 456 *vict_bit = (vict_blkno - blkno) >> 457 bits_per_unit; 458 mlog(0, "find the victim group: #%llu, " 459 "total_bits: %u, vict_bit: %u\n", 460 blkno, le16_to_cpu(bg->bg_bits), 461 *vict_bit); 462 goto out; 463 } 464 465 } while (le64_to_cpu(bg->bg_next_group)); 466 } 467 468 ret = -EINVAL; 469 out: 470 brelse(ac_bh); 471 472 /* 473 * caller has to release the gd_bh properly. 474 */ 475 return ret; 476 } 477 478 /* 479 * XXX: helper to validate and adjust moving goal. 480 */ 481 static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, 482 struct ocfs2_move_extents *range) 483 { 484 int ret, goal_bit = 0; 485 486 struct buffer_head *gd_bh = NULL; 487 struct ocfs2_group_desc *bg; 488 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 489 int c_to_b = 1 << (osb->s_clustersize_bits - 490 inode->i_sb->s_blocksize_bits); 491 492 /* 493 * make goal become cluster aligned. 494 */ 495 range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb, 496 range->me_goal); 497 /* 498 * validate goal sits within global_bitmap, and return the victim 499 * group desc 500 */ 501 ret = ocfs2_find_victim_alloc_group(inode, range->me_goal, 502 GLOBAL_BITMAP_SYSTEM_INODE, 503 OCFS2_INVALID_SLOT, 504 &goal_bit, &gd_bh); 505 if (ret) 506 goto out; 507 508 bg = (struct ocfs2_group_desc *)gd_bh->b_data; 509 510 /* 511 * moving goal is not allowd to start with a group desc blok(#0 blk) 512 * let's compromise to the latter cluster. 513 */ 514 if (range->me_goal == le64_to_cpu(bg->bg_blkno)) 515 range->me_goal += c_to_b; 516 517 /* 518 * movement is not gonna cross two groups. 519 */ 520 if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize < 521 range->me_len) { 522 ret = -EINVAL; 523 goto out; 524 } 525 /* 526 * more exact validations/adjustments will be performed later during 527 * moving operation for each extent range. 528 */ 529 mlog(0, "extents get ready to be moved to #%llu block\n", 530 range->me_goal); 531 532 out: 533 brelse(gd_bh); 534 535 return ret; 536 } 537 538 static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh, 539 int *goal_bit, u32 move_len, u32 max_hop, 540 u32 *phys_cpos) 541 { 542 int i, used, last_free_bits = 0, base_bit = *goal_bit; 543 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; 544 u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb, 545 le64_to_cpu(gd->bg_blkno)); 546 547 for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) { 548 549 used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap); 550 if (used) { 551 /* 552 * we even tried searching the free chunk by jumping 553 * a 'max_hop' distance, but still failed. 554 */ 555 if ((i - base_bit) > max_hop) { 556 *phys_cpos = 0; 557 break; 558 } 559 560 if (last_free_bits) 561 last_free_bits = 0; 562 563 continue; 564 } else 565 last_free_bits++; 566 567 if (last_free_bits == move_len) { 568 *goal_bit = i; 569 *phys_cpos = base_cpos + i; 570 break; 571 } 572 } 573 574 mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos); 575 } 576 577 static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, 578 u32 cpos, u32 phys_cpos, u32 *new_phys_cpos, 579 u32 len, int ext_flags) 580 { 581 int ret, credits = 0, extra_blocks = 0, goal_bit = 0; 582 handle_t *handle; 583 struct inode *inode = context->inode; 584 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 585 struct inode *tl_inode = osb->osb_tl_inode; 586 struct inode *gb_inode = NULL; 587 struct buffer_head *gb_bh = NULL; 588 struct buffer_head *gd_bh = NULL; 589 struct ocfs2_group_desc *gd; 590 struct ocfs2_refcount_tree *ref_tree = NULL; 591 u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb, 592 context->range->me_threshold); 593 u64 phys_blkno, new_phys_blkno; 594 595 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); 596 597 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) { 598 BUG_ON(!ocfs2_is_refcount_inode(inode)); 599 BUG_ON(!context->refcount_loc); 600 601 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, 602 &ref_tree, NULL); 603 if (ret) { 604 mlog_errno(ret); 605 return ret; 606 } 607 608 ret = ocfs2_prepare_refcount_change_for_del(inode, 609 context->refcount_loc, 610 phys_blkno, 611 len, 612 &credits, 613 &extra_blocks); 614 if (ret) { 615 mlog_errno(ret); 616 goto out; 617 } 618 } 619 620 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, 621 &context->meta_ac, 622 NULL, extra_blocks, &credits); 623 if (ret) { 624 mlog_errno(ret); 625 goto out; 626 } 627 628 /* 629 * need to count 2 extra credits for global_bitmap inode and 630 * group descriptor. 631 */ 632 credits += OCFS2_INODE_UPDATE_CREDITS + 1; 633 634 /* 635 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators() 636 * logic, while we still need to lock the global_bitmap. 637 */ 638 gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, 639 OCFS2_INVALID_SLOT); 640 if (!gb_inode) { 641 mlog(ML_ERROR, "unable to get global_bitmap inode\n"); 642 ret = -EIO; 643 goto out; 644 } 645 646 inode_lock(gb_inode); 647 648 ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1); 649 if (ret) { 650 mlog_errno(ret); 651 goto out_unlock_gb_mutex; 652 } 653 654 inode_lock(tl_inode); 655 656 handle = ocfs2_start_trans(osb, credits); 657 if (IS_ERR(handle)) { 658 ret = PTR_ERR(handle); 659 mlog_errno(ret); 660 goto out_unlock_tl_inode; 661 } 662 663 new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos); 664 ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno, 665 GLOBAL_BITMAP_SYSTEM_INODE, 666 OCFS2_INVALID_SLOT, 667 &goal_bit, &gd_bh); 668 if (ret) { 669 mlog_errno(ret); 670 goto out_commit; 671 } 672 673 /* 674 * probe the victim cluster group to find a proper 675 * region to fit wanted movement, it even will perfrom 676 * a best-effort attempt by compromising to a threshold 677 * around the goal. 678 */ 679 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, 680 new_phys_cpos); 681 if (!*new_phys_cpos) { 682 ret = -ENOSPC; 683 goto out_commit; 684 } 685 686 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, 687 *new_phys_cpos, ext_flags); 688 if (ret) { 689 mlog_errno(ret); 690 goto out_commit; 691 } 692 693 gd = (struct ocfs2_group_desc *)gd_bh->b_data; 694 ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len, 695 le16_to_cpu(gd->bg_chain)); 696 if (ret) { 697 mlog_errno(ret); 698 goto out_commit; 699 } 700 701 ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, 702 goal_bit, len); 703 if (ret) { 704 ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len, 705 le16_to_cpu(gd->bg_chain)); 706 mlog_errno(ret); 707 } 708 709 /* 710 * Here we should write the new page out first if we are 711 * in write-back mode. 712 */ 713 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); 714 if (ret) 715 mlog_errno(ret); 716 717 out_commit: 718 ocfs2_commit_trans(osb, handle); 719 brelse(gd_bh); 720 721 out_unlock_tl_inode: 722 inode_unlock(tl_inode); 723 724 ocfs2_inode_unlock(gb_inode, 1); 725 out_unlock_gb_mutex: 726 inode_unlock(gb_inode); 727 brelse(gb_bh); 728 iput(gb_inode); 729 730 out: 731 if (context->meta_ac) { 732 ocfs2_free_alloc_context(context->meta_ac); 733 context->meta_ac = NULL; 734 } 735 736 if (ref_tree) 737 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 738 739 return ret; 740 } 741 742 /* 743 * Helper to calculate the defraging length in one run according to threshold. 744 */ 745 static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged, 746 u32 threshold, int *skip) 747 { 748 if ((*alloc_size + *len_defraged) < threshold) { 749 /* 750 * proceed defragmentation until we meet the thresh 751 */ 752 *len_defraged += *alloc_size; 753 } else if (*len_defraged == 0) { 754 /* 755 * XXX: skip a large extent. 756 */ 757 *skip = 1; 758 } else { 759 /* 760 * split this extent to coalesce with former pieces as 761 * to reach the threshold. 762 * 763 * we're done here with one cycle of defragmentation 764 * in a size of 'thresh', resetting 'len_defraged' 765 * forces a new defragmentation. 766 */ 767 *alloc_size = threshold - *len_defraged; 768 *len_defraged = 0; 769 } 770 } 771 772 static int __ocfs2_move_extents_range(struct buffer_head *di_bh, 773 struct ocfs2_move_extents_context *context) 774 { 775 int ret = 0, flags, do_defrag, skip = 0; 776 u32 cpos, phys_cpos, move_start, len_to_move, alloc_size; 777 u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0; 778 779 struct inode *inode = context->inode; 780 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 781 struct ocfs2_move_extents *range = context->range; 782 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 783 784 if ((i_size_read(inode) == 0) || (range->me_len == 0)) 785 return 0; 786 787 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 788 return 0; 789 790 context->refcount_loc = le64_to_cpu(di->i_refcount_loc); 791 792 ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh); 793 ocfs2_init_dealloc_ctxt(&context->dealloc); 794 795 /* 796 * TO-DO XXX: 797 * 798 * - xattr extents. 799 */ 800 801 do_defrag = context->auto_defrag; 802 803 /* 804 * extents moving happens in unit of clusters, for the sake 805 * of simplicity, we may ignore two clusters where 'byte_start' 806 * and 'byte_start + len' were within. 807 */ 808 move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start); 809 len_to_move = (range->me_start + range->me_len) >> 810 osb->s_clustersize_bits; 811 if (len_to_move >= move_start) 812 len_to_move -= move_start; 813 else 814 len_to_move = 0; 815 816 if (do_defrag) { 817 defrag_thresh = range->me_threshold >> osb->s_clustersize_bits; 818 if (defrag_thresh <= 1) 819 goto done; 820 } else 821 new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, 822 range->me_goal); 823 824 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, " 825 "thresh: %u\n", 826 (unsigned long long)OCFS2_I(inode)->ip_blkno, 827 (unsigned long long)range->me_start, 828 (unsigned long long)range->me_len, 829 move_start, len_to_move, defrag_thresh); 830 831 cpos = move_start; 832 while (len_to_move) { 833 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size, 834 &flags); 835 if (ret) { 836 mlog_errno(ret); 837 goto out; 838 } 839 840 if (alloc_size > len_to_move) 841 alloc_size = len_to_move; 842 843 /* 844 * XXX: how to deal with a hole: 845 * 846 * - skip the hole of course 847 * - force a new defragmentation 848 */ 849 if (!phys_cpos) { 850 if (do_defrag) 851 len_defraged = 0; 852 853 goto next; 854 } 855 856 if (do_defrag) { 857 ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged, 858 defrag_thresh, &skip); 859 /* 860 * skip large extents 861 */ 862 if (skip) { 863 skip = 0; 864 goto next; 865 } 866 867 mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, " 868 "alloc_size: %u, len_defraged: %u\n", 869 cpos, phys_cpos, alloc_size, len_defraged); 870 871 ret = ocfs2_defrag_extent(context, cpos, phys_cpos, 872 &alloc_size, flags); 873 } else { 874 ret = ocfs2_move_extent(context, cpos, phys_cpos, 875 &new_phys_cpos, alloc_size, 876 flags); 877 878 new_phys_cpos += alloc_size; 879 } 880 881 if (ret < 0) { 882 mlog_errno(ret); 883 goto out; 884 } 885 886 context->clusters_moved += alloc_size; 887 next: 888 cpos += alloc_size; 889 len_to_move -= alloc_size; 890 } 891 892 done: 893 range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE; 894 895 out: 896 range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb, 897 context->clusters_moved); 898 range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb, 899 context->new_phys_cpos); 900 901 ocfs2_schedule_truncate_log_flush(osb, 1); 902 ocfs2_run_deallocs(osb, &context->dealloc); 903 904 return ret; 905 } 906 907 static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) 908 { 909 int status; 910 handle_t *handle; 911 struct inode *inode = context->inode; 912 struct ocfs2_dinode *di; 913 struct buffer_head *di_bh = NULL; 914 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 915 916 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 917 return -EROFS; 918 919 inode_lock(inode); 920 921 /* 922 * This prevents concurrent writes from other nodes 923 */ 924 status = ocfs2_rw_lock(inode, 1); 925 if (status) { 926 mlog_errno(status); 927 goto out; 928 } 929 930 status = ocfs2_inode_lock(inode, &di_bh, 1); 931 if (status) { 932 mlog_errno(status); 933 goto out_rw_unlock; 934 } 935 936 /* 937 * rememer ip_xattr_sem also needs to be held if necessary 938 */ 939 down_write(&OCFS2_I(inode)->ip_alloc_sem); 940 941 status = __ocfs2_move_extents_range(di_bh, context); 942 943 up_write(&OCFS2_I(inode)->ip_alloc_sem); 944 if (status) { 945 mlog_errno(status); 946 goto out_inode_unlock; 947 } 948 949 /* 950 * We update ctime for these changes 951 */ 952 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 953 if (IS_ERR(handle)) { 954 status = PTR_ERR(handle); 955 mlog_errno(status); 956 goto out_inode_unlock; 957 } 958 959 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 960 OCFS2_JOURNAL_ACCESS_WRITE); 961 if (status) { 962 mlog_errno(status); 963 goto out_commit; 964 } 965 966 di = (struct ocfs2_dinode *)di_bh->b_data; 967 inode->i_ctime = current_time(inode); 968 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 969 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 970 ocfs2_update_inode_fsync_trans(handle, inode, 0); 971 972 ocfs2_journal_dirty(handle, di_bh); 973 974 out_commit: 975 ocfs2_commit_trans(osb, handle); 976 977 out_inode_unlock: 978 brelse(di_bh); 979 ocfs2_inode_unlock(inode, 1); 980 out_rw_unlock: 981 ocfs2_rw_unlock(inode, 1); 982 out: 983 inode_unlock(inode); 984 985 return status; 986 } 987 988 int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) 989 { 990 int status; 991 992 struct inode *inode = file_inode(filp); 993 struct ocfs2_move_extents range; 994 struct ocfs2_move_extents_context *context; 995 996 if (!argp) 997 return -EINVAL; 998 999 status = mnt_want_write_file(filp); 1000 if (status) 1001 return status; 1002 1003 if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) { 1004 status = -EPERM; 1005 goto out_drop; 1006 } 1007 1008 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { 1009 status = -EPERM; 1010 goto out_drop; 1011 } 1012 1013 context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS); 1014 if (!context) { 1015 status = -ENOMEM; 1016 mlog_errno(status); 1017 goto out_drop; 1018 } 1019 1020 context->inode = inode; 1021 context->file = filp; 1022 1023 if (copy_from_user(&range, argp, sizeof(range))) { 1024 status = -EFAULT; 1025 goto out_free; 1026 } 1027 1028 if (range.me_start > i_size_read(inode)) { 1029 status = -EINVAL; 1030 goto out_free; 1031 } 1032 1033 if (range.me_start + range.me_len > i_size_read(inode)) 1034 range.me_len = i_size_read(inode) - range.me_start; 1035 1036 context->range = ⦥ 1037 1038 if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { 1039 context->auto_defrag = 1; 1040 /* 1041 * ok, the default theshold for the defragmentation 1042 * is 1M, since our maximum clustersize was 1M also. 1043 * any thought? 1044 */ 1045 if (!range.me_threshold) 1046 range.me_threshold = 1024 * 1024; 1047 1048 if (range.me_threshold > i_size_read(inode)) 1049 range.me_threshold = i_size_read(inode); 1050 1051 if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) 1052 context->partial = 1; 1053 } else { 1054 /* 1055 * first best-effort attempt to validate and adjust the goal 1056 * (physical address in block), while it can't guarantee later 1057 * operation can succeed all the time since global_bitmap may 1058 * change a bit over time. 1059 */ 1060 1061 status = ocfs2_validate_and_adjust_move_goal(inode, &range); 1062 if (status) 1063 goto out_copy; 1064 } 1065 1066 status = ocfs2_move_extents(context); 1067 if (status) 1068 mlog_errno(status); 1069 out_copy: 1070 /* 1071 * movement/defragmentation may end up being partially completed, 1072 * that's the reason why we need to return userspace the finished 1073 * length and new_offset even if failure happens somewhere. 1074 */ 1075 if (copy_to_user(argp, &range, sizeof(range))) 1076 status = -EFAULT; 1077 1078 out_free: 1079 kfree(context); 1080 out_drop: 1081 mnt_drop_write_file(filp); 1082 1083 return status; 1084 } 1085