1 /* 2 * Copyright IBM Corporation, 2007 3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of version 2.1 of the GNU Lesser General Public License 7 * as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 12 * 13 */ 14 15 #include <linux/slab.h> 16 #include "ext4_jbd2.h" 17 #include "ext4_extents.h" 18 19 /* 20 * The contiguous blocks details which can be 21 * represented by a single extent 22 */ 23 struct migrate_struct { 24 ext4_lblk_t first_block, last_block, curr_block; 25 ext4_fsblk_t first_pblock, last_pblock; 26 }; 27 28 static int finish_range(handle_t *handle, struct inode *inode, 29 struct migrate_struct *lb) 30 31 { 32 int retval = 0, needed; 33 struct ext4_extent newext; 34 struct ext4_ext_path *path; 35 if (lb->first_pblock == 0) 36 return 0; 37 38 /* Add the extent to temp inode*/ 39 newext.ee_block = cpu_to_le32(lb->first_block); 40 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); 41 ext4_ext_store_pblock(&newext, lb->first_pblock); 42 /* Locking only for convinience since we are operating on temp inode */ 43 down_write(&EXT4_I(inode)->i_data_sem); 44 path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0); 45 46 if (IS_ERR(path)) { 47 retval = PTR_ERR(path); 48 path = NULL; 49 goto err_out; 50 } 51 52 /* 53 * Calculate the credit needed to inserting this extent 54 * Since we are doing this in loop we may accumalate extra 55 * credit. But below we try to not accumalate too much 56 * of them by restarting the journal. 57 */ 58 needed = ext4_ext_calc_credits_for_single_extent(inode, 59 lb->last_block - lb->first_block + 1, path); 60 61 /* 62 * Make sure the credit we accumalated is not really high 63 */ 64 if (needed && ext4_handle_has_enough_credits(handle, 65 EXT4_RESERVE_TRANS_BLOCKS)) { 66 up_write((&EXT4_I(inode)->i_data_sem)); 67 retval = ext4_journal_restart(handle, needed); 68 down_write((&EXT4_I(inode)->i_data_sem)); 69 if (retval) 70 goto err_out; 71 } else if (needed) { 72 retval = ext4_journal_extend(handle, needed); 73 if (retval) { 74 /* 75 * IF not able to extend the journal restart the journal 76 */ 77 up_write((&EXT4_I(inode)->i_data_sem)); 78 retval = ext4_journal_restart(handle, needed); 79 down_write((&EXT4_I(inode)->i_data_sem)); 80 if (retval) 81 goto err_out; 82 } 83 } 84 retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); 85 err_out: 86 up_write((&EXT4_I(inode)->i_data_sem)); 87 if (path) { 88 ext4_ext_drop_refs(path); 89 kfree(path); 90 } 91 lb->first_pblock = 0; 92 return retval; 93 } 94 95 static int update_extent_range(handle_t *handle, struct inode *inode, 96 ext4_fsblk_t pblock, struct migrate_struct *lb) 97 { 98 int retval; 99 /* 100 * See if we can add on to the existing range (if it exists) 101 */ 102 if (lb->first_pblock && 103 (lb->last_pblock+1 == pblock) && 104 (lb->last_block+1 == lb->curr_block)) { 105 lb->last_pblock = pblock; 106 lb->last_block = lb->curr_block; 107 lb->curr_block++; 108 return 0; 109 } 110 /* 111 * Start a new range. 112 */ 113 retval = finish_range(handle, inode, lb); 114 lb->first_pblock = lb->last_pblock = pblock; 115 lb->first_block = lb->last_block = lb->curr_block; 116 lb->curr_block++; 117 return retval; 118 } 119 120 static int update_ind_extent_range(handle_t *handle, struct inode *inode, 121 ext4_fsblk_t pblock, 122 struct migrate_struct *lb) 123 { 124 struct buffer_head *bh; 125 __le32 *i_data; 126 int i, retval = 0; 127 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; 128 129 bh = sb_bread(inode->i_sb, pblock); 130 if (!bh) 131 return -EIO; 132 133 i_data = (__le32 *)bh->b_data; 134 for (i = 0; i < max_entries; i++) { 135 if (i_data[i]) { 136 retval = update_extent_range(handle, inode, 137 le32_to_cpu(i_data[i]), lb); 138 if (retval) 139 break; 140 } else { 141 lb->curr_block++; 142 } 143 } 144 put_bh(bh); 145 return retval; 146 147 } 148 149 static int update_dind_extent_range(handle_t *handle, struct inode *inode, 150 ext4_fsblk_t pblock, 151 struct migrate_struct *lb) 152 { 153 struct buffer_head *bh; 154 __le32 *i_data; 155 int i, retval = 0; 156 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; 157 158 bh = sb_bread(inode->i_sb, pblock); 159 if (!bh) 160 return -EIO; 161 162 i_data = (__le32 *)bh->b_data; 163 for (i = 0; i < max_entries; i++) { 164 if (i_data[i]) { 165 retval = update_ind_extent_range(handle, inode, 166 le32_to_cpu(i_data[i]), lb); 167 if (retval) 168 break; 169 } else { 170 /* Only update the file block number */ 171 lb->curr_block += max_entries; 172 } 173 } 174 put_bh(bh); 175 return retval; 176 177 } 178 179 static int update_tind_extent_range(handle_t *handle, struct inode *inode, 180 ext4_fsblk_t pblock, 181 struct migrate_struct *lb) 182 { 183 struct buffer_head *bh; 184 __le32 *i_data; 185 int i, retval = 0; 186 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; 187 188 bh = sb_bread(inode->i_sb, pblock); 189 if (!bh) 190 return -EIO; 191 192 i_data = (__le32 *)bh->b_data; 193 for (i = 0; i < max_entries; i++) { 194 if (i_data[i]) { 195 retval = update_dind_extent_range(handle, inode, 196 le32_to_cpu(i_data[i]), lb); 197 if (retval) 198 break; 199 } else { 200 /* Only update the file block number */ 201 lb->curr_block += max_entries * max_entries; 202 } 203 } 204 put_bh(bh); 205 return retval; 206 207 } 208 209 static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) 210 { 211 int retval = 0, needed; 212 213 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 214 return 0; 215 /* 216 * We are freeing a blocks. During this we touch 217 * superblock, group descriptor and block bitmap. 218 * So allocate a credit of 3. We may update 219 * quota (user and group). 220 */ 221 needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 222 223 if (ext4_journal_extend(handle, needed) != 0) 224 retval = ext4_journal_restart(handle, needed); 225 226 return retval; 227 } 228 229 static int free_dind_blocks(handle_t *handle, 230 struct inode *inode, __le32 i_data) 231 { 232 int i; 233 __le32 *tmp_idata; 234 struct buffer_head *bh; 235 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; 236 237 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); 238 if (!bh) 239 return -EIO; 240 241 tmp_idata = (__le32 *)bh->b_data; 242 for (i = 0; i < max_entries; i++) { 243 if (tmp_idata[i]) { 244 extend_credit_for_blkdel(handle, inode); 245 ext4_free_blocks(handle, inode, NULL, 246 le32_to_cpu(tmp_idata[i]), 1, 247 EXT4_FREE_BLOCKS_METADATA | 248 EXT4_FREE_BLOCKS_FORGET); 249 } 250 } 251 put_bh(bh); 252 extend_credit_for_blkdel(handle, inode); 253 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, 254 EXT4_FREE_BLOCKS_METADATA | 255 EXT4_FREE_BLOCKS_FORGET); 256 return 0; 257 } 258 259 static int free_tind_blocks(handle_t *handle, 260 struct inode *inode, __le32 i_data) 261 { 262 int i, retval = 0; 263 __le32 *tmp_idata; 264 struct buffer_head *bh; 265 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; 266 267 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); 268 if (!bh) 269 return -EIO; 270 271 tmp_idata = (__le32 *)bh->b_data; 272 for (i = 0; i < max_entries; i++) { 273 if (tmp_idata[i]) { 274 retval = free_dind_blocks(handle, 275 inode, tmp_idata[i]); 276 if (retval) { 277 put_bh(bh); 278 return retval; 279 } 280 } 281 } 282 put_bh(bh); 283 extend_credit_for_blkdel(handle, inode); 284 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, 285 EXT4_FREE_BLOCKS_METADATA | 286 EXT4_FREE_BLOCKS_FORGET); 287 return 0; 288 } 289 290 static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data) 291 { 292 int retval; 293 294 /* ei->i_data[EXT4_IND_BLOCK] */ 295 if (i_data[0]) { 296 extend_credit_for_blkdel(handle, inode); 297 ext4_free_blocks(handle, inode, NULL, 298 le32_to_cpu(i_data[0]), 1, 299 EXT4_FREE_BLOCKS_METADATA | 300 EXT4_FREE_BLOCKS_FORGET); 301 } 302 303 /* ei->i_data[EXT4_DIND_BLOCK] */ 304 if (i_data[1]) { 305 retval = free_dind_blocks(handle, inode, i_data[1]); 306 if (retval) 307 return retval; 308 } 309 310 /* ei->i_data[EXT4_TIND_BLOCK] */ 311 if (i_data[2]) { 312 retval = free_tind_blocks(handle, inode, i_data[2]); 313 if (retval) 314 return retval; 315 } 316 return 0; 317 } 318 319 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, 320 struct inode *tmp_inode) 321 { 322 int retval; 323 __le32 i_data[3]; 324 struct ext4_inode_info *ei = EXT4_I(inode); 325 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); 326 327 /* 328 * One credit accounted for writing the 329 * i_data field of the original inode 330 */ 331 retval = ext4_journal_extend(handle, 1); 332 if (retval) { 333 retval = ext4_journal_restart(handle, 1); 334 if (retval) 335 goto err_out; 336 } 337 338 i_data[0] = ei->i_data[EXT4_IND_BLOCK]; 339 i_data[1] = ei->i_data[EXT4_DIND_BLOCK]; 340 i_data[2] = ei->i_data[EXT4_TIND_BLOCK]; 341 342 down_write(&EXT4_I(inode)->i_data_sem); 343 /* 344 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation 345 * happened after we started the migrate. We need to 346 * fail the migrate 347 */ 348 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) { 349 retval = -EAGAIN; 350 up_write(&EXT4_I(inode)->i_data_sem); 351 goto err_out; 352 } else 353 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 354 /* 355 * We have the extent map build with the tmp inode. 356 * Now copy the i_data across 357 */ 358 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); 359 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); 360 361 /* 362 * Update i_blocks with the new blocks that got 363 * allocated while adding extents for extent index 364 * blocks. 365 * 366 * While converting to extents we need not 367 * update the orignal inode i_blocks for extent blocks 368 * via quota APIs. The quota update happened via tmp_inode already. 369 */ 370 spin_lock(&inode->i_lock); 371 inode->i_blocks += tmp_inode->i_blocks; 372 spin_unlock(&inode->i_lock); 373 up_write(&EXT4_I(inode)->i_data_sem); 374 375 /* 376 * We mark the inode dirty after, because we decrement the 377 * i_blocks when freeing the indirect meta-data blocks 378 */ 379 retval = free_ind_block(handle, inode, i_data); 380 ext4_mark_inode_dirty(handle, inode); 381 382 err_out: 383 return retval; 384 } 385 386 static int free_ext_idx(handle_t *handle, struct inode *inode, 387 struct ext4_extent_idx *ix) 388 { 389 int i, retval = 0; 390 ext4_fsblk_t block; 391 struct buffer_head *bh; 392 struct ext4_extent_header *eh; 393 394 block = ext4_idx_pblock(ix); 395 bh = sb_bread(inode->i_sb, block); 396 if (!bh) 397 return -EIO; 398 399 eh = (struct ext4_extent_header *)bh->b_data; 400 if (eh->eh_depth != 0) { 401 ix = EXT_FIRST_INDEX(eh); 402 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { 403 retval = free_ext_idx(handle, inode, ix); 404 if (retval) 405 break; 406 } 407 } 408 put_bh(bh); 409 extend_credit_for_blkdel(handle, inode); 410 ext4_free_blocks(handle, inode, NULL, block, 1, 411 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 412 return retval; 413 } 414 415 /* 416 * Free the extent meta data blocks only 417 */ 418 static int free_ext_block(handle_t *handle, struct inode *inode) 419 { 420 int i, retval = 0; 421 struct ext4_inode_info *ei = EXT4_I(inode); 422 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data; 423 struct ext4_extent_idx *ix; 424 if (eh->eh_depth == 0) 425 /* 426 * No extra blocks allocated for extent meta data 427 */ 428 return 0; 429 ix = EXT_FIRST_INDEX(eh); 430 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { 431 retval = free_ext_idx(handle, inode, ix); 432 if (retval) 433 return retval; 434 } 435 return retval; 436 } 437 438 int ext4_ext_migrate(struct inode *inode) 439 { 440 handle_t *handle; 441 int retval = 0, i; 442 __le32 *i_data; 443 struct ext4_inode_info *ei; 444 struct inode *tmp_inode = NULL; 445 struct migrate_struct lb; 446 unsigned long max_entries; 447 __u32 goal; 448 uid_t owner[2]; 449 450 /* 451 * If the filesystem does not support extents, or the inode 452 * already is extent-based, error out. 453 */ 454 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, 455 EXT4_FEATURE_INCOMPAT_EXTENTS) || 456 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 457 return -EINVAL; 458 459 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) 460 /* 461 * don't migrate fast symlink 462 */ 463 return retval; 464 465 /* 466 * Worst case we can touch the allocation bitmaps, a bgd 467 * block, and a block to link in the orphan list. We do need 468 * need to worry about credits for modifying the quota inode. 469 */ 470 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 471 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); 472 473 if (IS_ERR(handle)) { 474 retval = PTR_ERR(handle); 475 return retval; 476 } 477 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * 478 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; 479 owner[0] = i_uid_read(inode); 480 owner[1] = i_gid_read(inode); 481 tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, 482 S_IFREG, NULL, goal, owner); 483 if (IS_ERR(tmp_inode)) { 484 retval = PTR_ERR(tmp_inode); 485 ext4_journal_stop(handle); 486 return retval; 487 } 488 i_size_write(tmp_inode, i_size_read(inode)); 489 /* 490 * Set the i_nlink to zero so it will be deleted later 491 * when we drop inode reference. 492 */ 493 clear_nlink(tmp_inode); 494 495 ext4_ext_tree_init(handle, tmp_inode); 496 ext4_orphan_add(handle, tmp_inode); 497 ext4_journal_stop(handle); 498 499 /* 500 * start with one credit accounted for 501 * superblock modification. 502 * 503 * For the tmp_inode we already have committed the 504 * transaction that created the inode. Later as and 505 * when we add extents we extent the journal 506 */ 507 /* 508 * Even though we take i_mutex we can still cause block 509 * allocation via mmap write to holes. If we have allocated 510 * new blocks we fail migrate. New block allocation will 511 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated 512 * with i_data_sem held to prevent racing with block 513 * allocation. 514 */ 515 down_read(&EXT4_I(inode)->i_data_sem); 516 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 517 up_read((&EXT4_I(inode)->i_data_sem)); 518 519 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); 520 if (IS_ERR(handle)) { 521 /* 522 * It is impossible to update on-disk structures without 523 * a handle, so just rollback in-core changes and live other 524 * work to orphan_list_cleanup() 525 */ 526 ext4_orphan_del(NULL, tmp_inode); 527 retval = PTR_ERR(handle); 528 goto out; 529 } 530 531 ei = EXT4_I(inode); 532 i_data = ei->i_data; 533 memset(&lb, 0, sizeof(lb)); 534 535 /* 32 bit block address 4 bytes */ 536 max_entries = inode->i_sb->s_blocksize >> 2; 537 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) { 538 if (i_data[i]) { 539 retval = update_extent_range(handle, tmp_inode, 540 le32_to_cpu(i_data[i]), &lb); 541 if (retval) 542 goto err_out; 543 } else 544 lb.curr_block++; 545 } 546 if (i_data[EXT4_IND_BLOCK]) { 547 retval = update_ind_extent_range(handle, tmp_inode, 548 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb); 549 if (retval) 550 goto err_out; 551 } else 552 lb.curr_block += max_entries; 553 if (i_data[EXT4_DIND_BLOCK]) { 554 retval = update_dind_extent_range(handle, tmp_inode, 555 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb); 556 if (retval) 557 goto err_out; 558 } else 559 lb.curr_block += max_entries * max_entries; 560 if (i_data[EXT4_TIND_BLOCK]) { 561 retval = update_tind_extent_range(handle, tmp_inode, 562 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb); 563 if (retval) 564 goto err_out; 565 } 566 /* 567 * Build the last extent 568 */ 569 retval = finish_range(handle, tmp_inode, &lb); 570 err_out: 571 if (retval) 572 /* 573 * Failure case delete the extent information with the 574 * tmp_inode 575 */ 576 free_ext_block(handle, tmp_inode); 577 else { 578 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode); 579 if (retval) 580 /* 581 * if we fail to swap inode data free the extent 582 * details of the tmp inode 583 */ 584 free_ext_block(handle, tmp_inode); 585 } 586 587 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */ 588 if (ext4_journal_extend(handle, 1) != 0) 589 ext4_journal_restart(handle, 1); 590 591 /* 592 * Mark the tmp_inode as of size zero 593 */ 594 i_size_write(tmp_inode, 0); 595 596 /* 597 * set the i_blocks count to zero 598 * so that the ext4_delete_inode does the 599 * right job 600 * 601 * We don't need to take the i_lock because 602 * the inode is not visible to user space. 603 */ 604 tmp_inode->i_blocks = 0; 605 606 /* Reset the extent details */ 607 ext4_ext_tree_init(handle, tmp_inode); 608 ext4_journal_stop(handle); 609 out: 610 unlock_new_inode(tmp_inode); 611 iput(tmp_inode); 612 613 return retval; 614 } 615 616 /* 617 * Migrate a simple extent-based inode to use the i_blocks[] array 618 */ 619 int ext4_ind_migrate(struct inode *inode) 620 { 621 struct ext4_extent_header *eh; 622 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 623 struct ext4_inode_info *ei = EXT4_I(inode); 624 struct ext4_extent *ex; 625 unsigned int i, len; 626 ext4_fsblk_t blk; 627 handle_t *handle; 628 int ret; 629 630 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, 631 EXT4_FEATURE_INCOMPAT_EXTENTS) || 632 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 633 return -EINVAL; 634 635 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 636 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) 637 return -EOPNOTSUPP; 638 639 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); 640 if (IS_ERR(handle)) 641 return PTR_ERR(handle); 642 643 down_write(&EXT4_I(inode)->i_data_sem); 644 ret = ext4_ext_check_inode(inode); 645 if (ret) 646 goto errout; 647 648 eh = ext_inode_hdr(inode); 649 ex = EXT_FIRST_EXTENT(eh); 650 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS || 651 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) { 652 ret = -EOPNOTSUPP; 653 goto errout; 654 } 655 if (eh->eh_entries == 0) 656 blk = len = 0; 657 else { 658 len = le16_to_cpu(ex->ee_len); 659 blk = ext4_ext_pblock(ex); 660 if (len > EXT4_NDIR_BLOCKS) { 661 ret = -EOPNOTSUPP; 662 goto errout; 663 } 664 } 665 666 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); 667 memset(ei->i_data, 0, sizeof(ei->i_data)); 668 for (i=0; i < len; i++) 669 ei->i_data[i] = cpu_to_le32(blk++); 670 ext4_mark_inode_dirty(handle, inode); 671 errout: 672 ext4_journal_stop(handle); 673 up_write(&EXT4_I(inode)->i_data_sem); 674 return ret; 675 } 676