1 /* 2 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd. 3 * Written by Takashi Sato <t-sato@yk.jp.nec.com> 4 * Akira Fujita <a-fujita@rs.jp.nec.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2.1 of the GNU Lesser General Public License 8 * as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/fs.h> 17 #include <linux/quotaops.h> 18 #include "ext4_jbd2.h" 19 #include "ext4_extents.h" 20 #include "ext4.h" 21 22 #define get_ext_path(path, inode, block, ret) \ 23 do { \ 24 path = ext4_ext_find_extent(inode, block, path); \ 25 if (IS_ERR(path)) { \ 26 ret = PTR_ERR(path); \ 27 path = NULL; \ 28 } \ 29 } while (0) 30 31 /** 32 * copy_extent_status - Copy the extent's initialization status 33 * 34 * @src: an extent for getting initialize status 35 * @dest: an extent to be set the status 36 */ 37 static void 38 copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest) 39 { 40 if (ext4_ext_is_uninitialized(src)) 41 ext4_ext_mark_uninitialized(dest); 42 else 43 dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest)); 44 } 45 46 /** 47 * mext_next_extent - Search for the next extent and set it to "extent" 48 * 49 * @inode: inode which is searched 50 * @path: this will obtain data for the next extent 51 * @extent: pointer to the next extent we have just gotten 52 * 53 * Search the next extent in the array of ext4_ext_path structure (@path) 54 * and set it to ext4_extent structure (@extent). In addition, the member of 55 * @path (->p_ext) also points the next extent. Return 0 on success, 1 if 56 * ext4_ext_path structure refers to the last extent, or a negative error 57 * value on failure. 58 */ 59 static int 60 mext_next_extent(struct inode *inode, struct ext4_ext_path *path, 61 struct ext4_extent **extent) 62 { 63 int ppos, leaf_ppos = path->p_depth; 64 65 ppos = leaf_ppos; 66 if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { 67 /* leaf block */ 68 *extent = ++path[ppos].p_ext; 69 return 0; 70 } 71 72 while (--ppos >= 0) { 73 if (EXT_LAST_INDEX(path[ppos].p_hdr) > 74 path[ppos].p_idx) { 75 int cur_ppos = ppos; 76 77 /* index block */ 78 path[ppos].p_idx++; 79 path[ppos].p_block = idx_pblock(path[ppos].p_idx); 80 if (path[ppos+1].p_bh) 81 brelse(path[ppos+1].p_bh); 82 path[ppos+1].p_bh = 83 sb_bread(inode->i_sb, path[ppos].p_block); 84 if (!path[ppos+1].p_bh) 85 return -EIO; 86 path[ppos+1].p_hdr = 87 ext_block_hdr(path[ppos+1].p_bh); 88 89 /* Halfway index block */ 90 while (++cur_ppos < leaf_ppos) { 91 path[cur_ppos].p_idx = 92 EXT_FIRST_INDEX(path[cur_ppos].p_hdr); 93 path[cur_ppos].p_block = 94 idx_pblock(path[cur_ppos].p_idx); 95 if (path[cur_ppos+1].p_bh) 96 brelse(path[cur_ppos+1].p_bh); 97 path[cur_ppos+1].p_bh = sb_bread(inode->i_sb, 98 path[cur_ppos].p_block); 99 if (!path[cur_ppos+1].p_bh) 100 return -EIO; 101 path[cur_ppos+1].p_hdr = 102 ext_block_hdr(path[cur_ppos+1].p_bh); 103 } 104 105 /* leaf block */ 106 path[leaf_ppos].p_ext = *extent = 107 EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); 108 return 0; 109 } 110 } 111 /* We found the last extent */ 112 return 1; 113 } 114 115 /** 116 * mext_double_down_read - Acquire two inodes' read semaphore 117 * 118 * @orig_inode: original inode structure 119 * @donor_inode: donor inode structure 120 * Acquire read semaphore of the two inodes (orig and donor) by i_ino order. 121 */ 122 static void 123 mext_double_down_read(struct inode *orig_inode, struct inode *donor_inode) 124 { 125 struct inode *first = orig_inode, *second = donor_inode; 126 127 BUG_ON(orig_inode == NULL || donor_inode == NULL); 128 129 /* 130 * Use the inode number to provide the stable locking order instead 131 * of its address, because the C language doesn't guarantee you can 132 * compare pointers that don't come from the same array. 133 */ 134 if (donor_inode->i_ino < orig_inode->i_ino) { 135 first = donor_inode; 136 second = orig_inode; 137 } 138 139 down_read(&EXT4_I(first)->i_data_sem); 140 down_read(&EXT4_I(second)->i_data_sem); 141 } 142 143 /** 144 * mext_double_down_write - Acquire two inodes' write semaphore 145 * 146 * @orig_inode: original inode structure 147 * @donor_inode: donor inode structure 148 * Acquire write semaphore of the two inodes (orig and donor) by i_ino order. 149 */ 150 static void 151 mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode) 152 { 153 struct inode *first = orig_inode, *second = donor_inode; 154 155 BUG_ON(orig_inode == NULL || donor_inode == NULL); 156 157 /* 158 * Use the inode number to provide the stable locking order instead 159 * of its address, because the C language doesn't guarantee you can 160 * compare pointers that don't come from the same array. 161 */ 162 if (donor_inode->i_ino < orig_inode->i_ino) { 163 first = donor_inode; 164 second = orig_inode; 165 } 166 167 down_write(&EXT4_I(first)->i_data_sem); 168 down_write(&EXT4_I(second)->i_data_sem); 169 } 170 171 /** 172 * mext_double_up_read - Release two inodes' read semaphore 173 * 174 * @orig_inode: original inode structure to be released its lock first 175 * @donor_inode: donor inode structure to be released its lock second 176 * Release read semaphore of two inodes (orig and donor). 177 */ 178 static void 179 mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode) 180 { 181 BUG_ON(orig_inode == NULL || donor_inode == NULL); 182 183 up_read(&EXT4_I(orig_inode)->i_data_sem); 184 up_read(&EXT4_I(donor_inode)->i_data_sem); 185 } 186 187 /** 188 * mext_double_up_write - Release two inodes' write semaphore 189 * 190 * @orig_inode: original inode structure to be released its lock first 191 * @donor_inode: donor inode structure to be released its lock second 192 * Release write semaphore of two inodes (orig and donor). 193 */ 194 static void 195 mext_double_up_write(struct inode *orig_inode, struct inode *donor_inode) 196 { 197 BUG_ON(orig_inode == NULL || donor_inode == NULL); 198 199 up_write(&EXT4_I(orig_inode)->i_data_sem); 200 up_write(&EXT4_I(donor_inode)->i_data_sem); 201 } 202 203 /** 204 * mext_insert_across_blocks - Insert extents across leaf block 205 * 206 * @handle: journal handle 207 * @orig_inode: original inode 208 * @o_start: first original extent to be changed 209 * @o_end: last original extent to be changed 210 * @start_ext: first new extent to be inserted 211 * @new_ext: middle of new extent to be inserted 212 * @end_ext: last new extent to be inserted 213 * 214 * Allocate a new leaf block and insert extents into it. Return 0 on success, 215 * or a negative error value on failure. 216 */ 217 static int 218 mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, 219 struct ext4_extent *o_start, struct ext4_extent *o_end, 220 struct ext4_extent *start_ext, struct ext4_extent *new_ext, 221 struct ext4_extent *end_ext) 222 { 223 struct ext4_ext_path *orig_path = NULL; 224 ext4_lblk_t eblock = 0; 225 int new_flag = 0; 226 int end_flag = 0; 227 int err = 0; 228 229 if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) { 230 if (o_start == o_end) { 231 232 /* start_ext new_ext end_ext 233 * donor |---------|-----------|--------| 234 * orig |------------------------------| 235 */ 236 end_flag = 1; 237 } else { 238 239 /* start_ext new_ext end_ext 240 * donor |---------|----------|---------| 241 * orig |---------------|--------------| 242 */ 243 o_end->ee_block = end_ext->ee_block; 244 o_end->ee_len = end_ext->ee_len; 245 ext4_ext_store_pblock(o_end, ext_pblock(end_ext)); 246 } 247 248 o_start->ee_len = start_ext->ee_len; 249 new_flag = 1; 250 251 } else if (start_ext->ee_len && new_ext->ee_len && 252 !end_ext->ee_len && o_start == o_end) { 253 254 /* start_ext new_ext 255 * donor |--------------|---------------| 256 * orig |------------------------------| 257 */ 258 o_start->ee_len = start_ext->ee_len; 259 new_flag = 1; 260 261 } else if (!start_ext->ee_len && new_ext->ee_len && 262 end_ext->ee_len && o_start == o_end) { 263 264 /* new_ext end_ext 265 * donor |--------------|---------------| 266 * orig |------------------------------| 267 */ 268 o_end->ee_block = end_ext->ee_block; 269 o_end->ee_len = end_ext->ee_len; 270 ext4_ext_store_pblock(o_end, ext_pblock(end_ext)); 271 272 /* 273 * Set 0 to the extent block if new_ext was 274 * the first block. 275 */ 276 if (new_ext->ee_block) 277 eblock = le32_to_cpu(new_ext->ee_block); 278 279 new_flag = 1; 280 } else { 281 ext4_debug("ext4 move extent: Unexpected insert case\n"); 282 return -EIO; 283 } 284 285 if (new_flag) { 286 get_ext_path(orig_path, orig_inode, eblock, err); 287 if (orig_path == NULL) 288 goto out; 289 290 if (ext4_ext_insert_extent(handle, orig_inode, 291 orig_path, new_ext)) 292 goto out; 293 } 294 295 if (end_flag) { 296 get_ext_path(orig_path, orig_inode, 297 le32_to_cpu(end_ext->ee_block) - 1, err); 298 if (orig_path == NULL) 299 goto out; 300 301 if (ext4_ext_insert_extent(handle, orig_inode, 302 orig_path, end_ext)) 303 goto out; 304 } 305 out: 306 if (orig_path) { 307 ext4_ext_drop_refs(orig_path); 308 kfree(orig_path); 309 } 310 311 return err; 312 313 } 314 315 /** 316 * mext_insert_inside_block - Insert new extent to the extent block 317 * 318 * @o_start: first original extent to be moved 319 * @o_end: last original extent to be moved 320 * @start_ext: first new extent to be inserted 321 * @new_ext: middle of new extent to be inserted 322 * @end_ext: last new extent to be inserted 323 * @eh: extent header of target leaf block 324 * @range_to_move: used to decide how to insert extent 325 * 326 * Insert extents into the leaf block. The extent (@o_start) is overwritten 327 * by inserted extents. 328 */ 329 static void 330 mext_insert_inside_block(struct ext4_extent *o_start, 331 struct ext4_extent *o_end, 332 struct ext4_extent *start_ext, 333 struct ext4_extent *new_ext, 334 struct ext4_extent *end_ext, 335 struct ext4_extent_header *eh, 336 int range_to_move) 337 { 338 int i = 0; 339 unsigned long len; 340 341 /* Move the existing extents */ 342 if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) { 343 len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) - 344 (unsigned long)(o_end + 1); 345 memmove(o_end + 1 + range_to_move, o_end + 1, len); 346 } 347 348 /* Insert start entry */ 349 if (start_ext->ee_len) 350 o_start[i++].ee_len = start_ext->ee_len; 351 352 /* Insert new entry */ 353 if (new_ext->ee_len) { 354 o_start[i] = *new_ext; 355 ext4_ext_store_pblock(&o_start[i++], ext_pblock(new_ext)); 356 } 357 358 /* Insert end entry */ 359 if (end_ext->ee_len) 360 o_start[i] = *end_ext; 361 362 /* Increment the total entries counter on the extent block */ 363 le16_add_cpu(&eh->eh_entries, range_to_move); 364 } 365 366 /** 367 * mext_insert_extents - Insert new extent 368 * 369 * @handle: journal handle 370 * @orig_inode: original inode 371 * @orig_path: path indicates first extent to be changed 372 * @o_start: first original extent to be changed 373 * @o_end: last original extent to be changed 374 * @start_ext: first new extent to be inserted 375 * @new_ext: middle of new extent to be inserted 376 * @end_ext: last new extent to be inserted 377 * 378 * Call the function to insert extents. If we cannot add more extents into 379 * the leaf block, we call mext_insert_across_blocks() to create a 380 * new leaf block. Otherwise call mext_insert_inside_block(). Return 0 381 * on success, or a negative error value on failure. 382 */ 383 static int 384 mext_insert_extents(handle_t *handle, struct inode *orig_inode, 385 struct ext4_ext_path *orig_path, 386 struct ext4_extent *o_start, 387 struct ext4_extent *o_end, 388 struct ext4_extent *start_ext, 389 struct ext4_extent *new_ext, 390 struct ext4_extent *end_ext) 391 { 392 struct ext4_extent_header *eh; 393 unsigned long need_slots, slots_range; 394 int range_to_move, depth, ret; 395 396 /* 397 * The extents need to be inserted 398 * start_extent + new_extent + end_extent. 399 */ 400 need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) + 401 (new_ext->ee_len ? 1 : 0); 402 403 /* The number of slots between start and end */ 404 slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1) 405 / sizeof(struct ext4_extent); 406 407 /* Range to move the end of extent */ 408 range_to_move = need_slots - slots_range; 409 depth = orig_path->p_depth; 410 orig_path += depth; 411 eh = orig_path->p_hdr; 412 413 if (depth) { 414 /* Register to journal */ 415 ret = ext4_journal_get_write_access(handle, orig_path->p_bh); 416 if (ret) 417 return ret; 418 } 419 420 /* Expansion */ 421 if (range_to_move > 0 && 422 (range_to_move > le16_to_cpu(eh->eh_max) 423 - le16_to_cpu(eh->eh_entries))) { 424 425 ret = mext_insert_across_blocks(handle, orig_inode, o_start, 426 o_end, start_ext, new_ext, end_ext); 427 if (ret < 0) 428 return ret; 429 } else 430 mext_insert_inside_block(o_start, o_end, start_ext, new_ext, 431 end_ext, eh, range_to_move); 432 433 if (depth) { 434 ret = ext4_handle_dirty_metadata(handle, orig_inode, 435 orig_path->p_bh); 436 if (ret) 437 return ret; 438 } else { 439 ret = ext4_mark_inode_dirty(handle, orig_inode); 440 if (ret < 0) 441 return ret; 442 } 443 444 return 0; 445 } 446 447 /** 448 * mext_leaf_block - Move one leaf extent block into the inode. 449 * 450 * @handle: journal handle 451 * @orig_inode: original inode 452 * @orig_path: path indicates first extent to be changed 453 * @dext: donor extent 454 * @from: start offset on the target file 455 * 456 * In order to insert extents into the leaf block, we must divide the extent 457 * in the leaf block into three extents. The one is located to be inserted 458 * extents, and the others are located around it. 459 * 460 * Therefore, this function creates structures to save extents of the leaf 461 * block, and inserts extents by calling mext_insert_extents() with 462 * created extents. Return 0 on success, or a negative error value on failure. 463 */ 464 static int 465 mext_leaf_block(handle_t *handle, struct inode *orig_inode, 466 struct ext4_ext_path *orig_path, struct ext4_extent *dext, 467 ext4_lblk_t *from) 468 { 469 struct ext4_extent *oext, *o_start, *o_end, *prev_ext; 470 struct ext4_extent new_ext, start_ext, end_ext; 471 ext4_lblk_t new_ext_end; 472 ext4_fsblk_t new_phys_end; 473 int oext_alen, new_ext_alen, end_ext_alen; 474 int depth = ext_depth(orig_inode); 475 int ret; 476 477 o_start = o_end = oext = orig_path[depth].p_ext; 478 oext_alen = ext4_ext_get_actual_len(oext); 479 start_ext.ee_len = end_ext.ee_len = 0; 480 481 new_ext.ee_block = cpu_to_le32(*from); 482 ext4_ext_store_pblock(&new_ext, ext_pblock(dext)); 483 new_ext.ee_len = dext->ee_len; 484 new_ext_alen = ext4_ext_get_actual_len(&new_ext); 485 new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; 486 new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1; 487 488 /* 489 * Case: original extent is first 490 * oext |--------| 491 * new_ext |--| 492 * start_ext |--| 493 */ 494 if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) && 495 le32_to_cpu(new_ext.ee_block) < 496 le32_to_cpu(oext->ee_block) + oext_alen) { 497 start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - 498 le32_to_cpu(oext->ee_block)); 499 copy_extent_status(oext, &start_ext); 500 } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { 501 prev_ext = oext - 1; 502 /* 503 * We can merge new_ext into previous extent, 504 * if these are contiguous and same extent type. 505 */ 506 if (ext4_can_extents_be_merged(orig_inode, prev_ext, 507 &new_ext)) { 508 o_start = prev_ext; 509 start_ext.ee_len = cpu_to_le16( 510 ext4_ext_get_actual_len(prev_ext) + 511 new_ext_alen); 512 copy_extent_status(prev_ext, &start_ext); 513 new_ext.ee_len = 0; 514 } 515 } 516 517 /* 518 * Case: new_ext_end must be less than oext 519 * oext |-----------| 520 * new_ext |-------| 521 */ 522 BUG_ON(le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end); 523 524 /* 525 * Case: new_ext is smaller than original extent 526 * oext |---------------| 527 * new_ext |-----------| 528 * end_ext |---| 529 */ 530 if (le32_to_cpu(oext->ee_block) <= new_ext_end && 531 new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) { 532 end_ext.ee_len = 533 cpu_to_le16(le32_to_cpu(oext->ee_block) + 534 oext_alen - 1 - new_ext_end); 535 copy_extent_status(oext, &end_ext); 536 end_ext_alen = ext4_ext_get_actual_len(&end_ext); 537 ext4_ext_store_pblock(&end_ext, 538 (ext_pblock(o_end) + oext_alen - end_ext_alen)); 539 end_ext.ee_block = 540 cpu_to_le32(le32_to_cpu(o_end->ee_block) + 541 oext_alen - end_ext_alen); 542 } 543 544 ret = mext_insert_extents(handle, orig_inode, orig_path, o_start, 545 o_end, &start_ext, &new_ext, &end_ext); 546 return ret; 547 } 548 549 /** 550 * mext_calc_swap_extents - Calculate extents for extent swapping. 551 * 552 * @tmp_dext: the extent that will belong to the original inode 553 * @tmp_oext: the extent that will belong to the donor inode 554 * @orig_off: block offset of original inode 555 * @donor_off: block offset of donor inode 556 * @max_count: the maximun length of extents 557 */ 558 static void 559 mext_calc_swap_extents(struct ext4_extent *tmp_dext, 560 struct ext4_extent *tmp_oext, 561 ext4_lblk_t orig_off, ext4_lblk_t donor_off, 562 ext4_lblk_t max_count) 563 { 564 ext4_lblk_t diff, orig_diff; 565 struct ext4_extent dext_old, oext_old; 566 567 dext_old = *tmp_dext; 568 oext_old = *tmp_oext; 569 570 /* When tmp_dext is too large, pick up the target range. */ 571 diff = donor_off - le32_to_cpu(tmp_dext->ee_block); 572 573 ext4_ext_store_pblock(tmp_dext, ext_pblock(tmp_dext) + diff); 574 tmp_dext->ee_block = 575 cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff); 576 tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff); 577 578 if (max_count < ext4_ext_get_actual_len(tmp_dext)) 579 tmp_dext->ee_len = cpu_to_le16(max_count); 580 581 orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block); 582 ext4_ext_store_pblock(tmp_oext, ext_pblock(tmp_oext) + orig_diff); 583 584 /* Adjust extent length if donor extent is larger than orig */ 585 if (ext4_ext_get_actual_len(tmp_dext) > 586 ext4_ext_get_actual_len(tmp_oext) - orig_diff) 587 tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) - 588 orig_diff); 589 590 tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext)); 591 592 copy_extent_status(&oext_old, tmp_dext); 593 copy_extent_status(&dext_old, tmp_oext); 594 } 595 596 /** 597 * mext_replace_branches - Replace original extents with new extents 598 * 599 * @handle: journal handle 600 * @orig_inode: original inode 601 * @donor_inode: donor inode 602 * @from: block offset of orig_inode 603 * @count: block count to be replaced 604 * 605 * Replace original inode extents and donor inode extents page by page. 606 * We implement this replacement in the following three steps: 607 * 1. Save the block information of original and donor inodes into 608 * dummy extents. 609 * 2. Change the block information of original inode to point at the 610 * donor inode blocks. 611 * 3. Change the block information of donor inode to point at the saved 612 * original inode blocks in the dummy extents. 613 * 614 * Return 0 on success, or a negative error value on failure. 615 */ 616 static int 617 mext_replace_branches(handle_t *handle, struct inode *orig_inode, 618 struct inode *donor_inode, ext4_lblk_t from, 619 ext4_lblk_t count) 620 { 621 struct ext4_ext_path *orig_path = NULL; 622 struct ext4_ext_path *donor_path = NULL; 623 struct ext4_extent *oext, *dext; 624 struct ext4_extent tmp_dext, tmp_oext; 625 ext4_lblk_t orig_off = from, donor_off = from; 626 int err = 0; 627 int depth; 628 int replaced_count = 0; 629 int dext_alen; 630 631 mext_double_down_write(orig_inode, donor_inode); 632 633 /* Get the original extent for the block "orig_off" */ 634 get_ext_path(orig_path, orig_inode, orig_off, err); 635 if (orig_path == NULL) 636 goto out; 637 638 /* Get the donor extent for the head */ 639 get_ext_path(donor_path, donor_inode, donor_off, err); 640 if (donor_path == NULL) 641 goto out; 642 depth = ext_depth(orig_inode); 643 oext = orig_path[depth].p_ext; 644 tmp_oext = *oext; 645 646 depth = ext_depth(donor_inode); 647 dext = donor_path[depth].p_ext; 648 tmp_dext = *dext; 649 650 mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, 651 donor_off, count); 652 653 /* Loop for the donor extents */ 654 while (1) { 655 /* The extent for donor must be found. */ 656 BUG_ON(!dext || donor_off != le32_to_cpu(tmp_dext.ee_block)); 657 658 /* Set donor extent to orig extent */ 659 err = mext_leaf_block(handle, orig_inode, 660 orig_path, &tmp_dext, &orig_off); 661 if (err < 0) 662 goto out; 663 664 /* Set orig extent to donor extent */ 665 err = mext_leaf_block(handle, donor_inode, 666 donor_path, &tmp_oext, &donor_off); 667 if (err < 0) 668 goto out; 669 670 dext_alen = ext4_ext_get_actual_len(&tmp_dext); 671 replaced_count += dext_alen; 672 donor_off += dext_alen; 673 orig_off += dext_alen; 674 675 /* Already moved the expected blocks */ 676 if (replaced_count >= count) 677 break; 678 679 if (orig_path) 680 ext4_ext_drop_refs(orig_path); 681 get_ext_path(orig_path, orig_inode, orig_off, err); 682 if (orig_path == NULL) 683 goto out; 684 depth = ext_depth(orig_inode); 685 oext = orig_path[depth].p_ext; 686 if (le32_to_cpu(oext->ee_block) + 687 ext4_ext_get_actual_len(oext) <= orig_off) { 688 err = 0; 689 goto out; 690 } 691 tmp_oext = *oext; 692 693 if (donor_path) 694 ext4_ext_drop_refs(donor_path); 695 get_ext_path(donor_path, donor_inode, 696 donor_off, err); 697 if (donor_path == NULL) 698 goto out; 699 depth = ext_depth(donor_inode); 700 dext = donor_path[depth].p_ext; 701 if (le32_to_cpu(dext->ee_block) + 702 ext4_ext_get_actual_len(dext) <= donor_off) { 703 err = 0; 704 goto out; 705 } 706 tmp_dext = *dext; 707 708 mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, 709 donor_off, 710 count - replaced_count); 711 } 712 713 out: 714 if (orig_path) { 715 ext4_ext_drop_refs(orig_path); 716 kfree(orig_path); 717 } 718 if (donor_path) { 719 ext4_ext_drop_refs(donor_path); 720 kfree(donor_path); 721 } 722 723 mext_double_up_write(orig_inode, donor_inode); 724 return err; 725 } 726 727 /** 728 * move_extent_per_page - Move extent data per page 729 * 730 * @o_filp: file structure of original file 731 * @donor_inode: donor inode 732 * @orig_page_offset: page index on original file 733 * @data_offset_in_page: block index where data swapping starts 734 * @block_len_in_page: the number of blocks to be swapped 735 * @uninit: orig extent is uninitialized or not 736 * 737 * Save the data in original inode blocks and replace original inode extents 738 * with donor inode extents by calling mext_replace_branches(). 739 * Finally, write out the saved data in new original inode blocks. Return 0 740 * on success, or a negative error value on failure. 741 */ 742 static int 743 move_extent_par_page(struct file *o_filp, struct inode *donor_inode, 744 pgoff_t orig_page_offset, int data_offset_in_page, 745 int block_len_in_page, int uninit) 746 { 747 struct inode *orig_inode = o_filp->f_dentry->d_inode; 748 struct address_space *mapping = orig_inode->i_mapping; 749 struct buffer_head *bh; 750 struct page *page = NULL; 751 const struct address_space_operations *a_ops = mapping->a_ops; 752 handle_t *handle; 753 ext4_lblk_t orig_blk_offset; 754 long long offs = orig_page_offset << PAGE_CACHE_SHIFT; 755 unsigned long blocksize = orig_inode->i_sb->s_blocksize; 756 unsigned int w_flags = 0; 757 unsigned int tmp_data_len, data_len; 758 void *fsdata; 759 int ret, i, jblocks; 760 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 761 762 /* 763 * It needs twice the amount of ordinary journal buffers because 764 * inode and donor_inode may change each different metadata blocks. 765 */ 766 jblocks = ext4_writepage_trans_blocks(orig_inode) * 2; 767 handle = ext4_journal_start(orig_inode, jblocks); 768 if (IS_ERR(handle)) { 769 ret = PTR_ERR(handle); 770 return ret; 771 } 772 773 if (segment_eq(get_fs(), KERNEL_DS)) 774 w_flags |= AOP_FLAG_UNINTERRUPTIBLE; 775 776 orig_blk_offset = orig_page_offset * blocks_per_page + 777 data_offset_in_page; 778 779 /* 780 * If orig extent is uninitialized one, 781 * it's not necessary force the page into memory 782 * and then force it to be written out again. 783 * Just swap data blocks between orig and donor. 784 */ 785 if (uninit) { 786 ret = mext_replace_branches(handle, orig_inode, 787 donor_inode, orig_blk_offset, 788 block_len_in_page); 789 790 /* Clear the inode cache not to refer to the old data */ 791 ext4_ext_invalidate_cache(orig_inode); 792 ext4_ext_invalidate_cache(donor_inode); 793 goto out2; 794 } 795 796 offs = (long long)orig_blk_offset << orig_inode->i_blkbits; 797 798 /* Calculate data_len */ 799 if ((orig_blk_offset + block_len_in_page - 1) == 800 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { 801 /* Replace the last block */ 802 tmp_data_len = orig_inode->i_size & (blocksize - 1); 803 /* 804 * If data_len equal zero, it shows data_len is multiples of 805 * blocksize. So we set appropriate value. 806 */ 807 if (tmp_data_len == 0) 808 tmp_data_len = blocksize; 809 810 data_len = tmp_data_len + 811 ((block_len_in_page - 1) << orig_inode->i_blkbits); 812 } else { 813 data_len = block_len_in_page << orig_inode->i_blkbits; 814 } 815 816 ret = a_ops->write_begin(o_filp, mapping, offs, data_len, w_flags, 817 &page, &fsdata); 818 if (unlikely(ret < 0)) 819 goto out; 820 821 if (!PageUptodate(page)) { 822 mapping->a_ops->readpage(o_filp, page); 823 lock_page(page); 824 } 825 826 /* 827 * try_to_release_page() doesn't call releasepage in writeback mode. 828 * We should care about the order of writing to the same file 829 * by multiple move extent processes. 830 * It needs to call wait_on_page_writeback() to wait for the 831 * writeback of the page. 832 */ 833 if (PageWriteback(page)) 834 wait_on_page_writeback(page); 835 836 /* Release old bh and drop refs */ 837 try_to_release_page(page, 0); 838 839 ret = mext_replace_branches(handle, orig_inode, donor_inode, 840 orig_blk_offset, block_len_in_page); 841 if (ret < 0) 842 goto out; 843 844 /* Clear the inode cache not to refer to the old data */ 845 ext4_ext_invalidate_cache(orig_inode); 846 ext4_ext_invalidate_cache(donor_inode); 847 848 if (!page_has_buffers(page)) 849 create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0); 850 851 bh = page_buffers(page); 852 for (i = 0; i < data_offset_in_page; i++) 853 bh = bh->b_this_page; 854 855 for (i = 0; i < block_len_in_page; i++) { 856 ret = ext4_get_block(orig_inode, 857 (sector_t)(orig_blk_offset + i), bh, 0); 858 if (ret < 0) 859 goto out; 860 861 if (bh->b_this_page != NULL) 862 bh = bh->b_this_page; 863 } 864 865 ret = a_ops->write_end(o_filp, mapping, offs, data_len, data_len, 866 page, fsdata); 867 page = NULL; 868 869 out: 870 if (unlikely(page)) { 871 if (PageLocked(page)) 872 unlock_page(page); 873 page_cache_release(page); 874 } 875 out2: 876 ext4_journal_stop(handle); 877 878 return ret < 0 ? ret : 0; 879 } 880 881 /** 882 * mext_check_argumants - Check whether move extent can be done 883 * 884 * @orig_inode: original inode 885 * @donor_inode: donor inode 886 * @orig_start: logical start offset in block for orig 887 * @donor_start: logical start offset in block for donor 888 * @len: the number of blocks to be moved 889 * @moved_len: moved block length 890 * 891 * Check the arguments of ext4_move_extents() whether the files can be 892 * exchanged with each other. 893 * Return 0 on success, or a negative error value on failure. 894 */ 895 static int 896 mext_check_arguments(struct inode *orig_inode, 897 struct inode *donor_inode, __u64 orig_start, 898 __u64 donor_start, __u64 *len, __u64 moved_len) 899 { 900 /* Regular file check */ 901 if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { 902 ext4_debug("ext4 move extent: The argument files should be " 903 "regular file [ino:orig %lu, donor %lu]\n", 904 orig_inode->i_ino, donor_inode->i_ino); 905 return -EINVAL; 906 } 907 908 /* Ext4 move extent does not support swapfile */ 909 if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { 910 ext4_debug("ext4 move extent: The argument files should " 911 "not be swapfile [ino:orig %lu, donor %lu]\n", 912 orig_inode->i_ino, donor_inode->i_ino); 913 return -EINVAL; 914 } 915 916 /* Files should be in the same ext4 FS */ 917 if (orig_inode->i_sb != donor_inode->i_sb) { 918 ext4_debug("ext4 move extent: The argument files " 919 "should be in same FS [ino:orig %lu, donor %lu]\n", 920 orig_inode->i_ino, donor_inode->i_ino); 921 return -EINVAL; 922 } 923 924 /* orig and donor should be different file */ 925 if (orig_inode->i_ino == donor_inode->i_ino) { 926 ext4_debug("ext4 move extent: The argument files should not " 927 "be same file [ino:orig %lu, donor %lu]\n", 928 orig_inode->i_ino, donor_inode->i_ino); 929 return -EINVAL; 930 } 931 932 /* Ext4 move extent supports only extent based file */ 933 if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) { 934 ext4_debug("ext4 move extent: orig file is not extents " 935 "based file [ino:orig %lu]\n", orig_inode->i_ino); 936 return -EOPNOTSUPP; 937 } else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) { 938 ext4_debug("ext4 move extent: donor file is not extents " 939 "based file [ino:donor %lu]\n", donor_inode->i_ino); 940 return -EOPNOTSUPP; 941 } 942 943 if ((!orig_inode->i_size) || (!donor_inode->i_size)) { 944 ext4_debug("ext4 move extent: File size is 0 byte\n"); 945 return -EINVAL; 946 } 947 948 /* Start offset should be same */ 949 if (orig_start != donor_start) { 950 ext4_debug("ext4 move extent: orig and donor's start " 951 "offset are not same [ino:orig %lu, donor %lu]\n", 952 orig_inode->i_ino, donor_inode->i_ino); 953 return -EINVAL; 954 } 955 956 if (moved_len) { 957 ext4_debug("ext4 move extent: moved_len should be 0 " 958 "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, 959 donor_inode->i_ino); 960 return -EINVAL; 961 } 962 963 if ((orig_start > MAX_DEFRAG_SIZE) || 964 (donor_start > MAX_DEFRAG_SIZE) || 965 (*len > MAX_DEFRAG_SIZE) || 966 (orig_start + *len > MAX_DEFRAG_SIZE)) { 967 ext4_debug("ext4 move extent: Can't handle over [%lu] blocks " 968 "[ino:orig %lu, donor %lu]\n", MAX_DEFRAG_SIZE, 969 orig_inode->i_ino, donor_inode->i_ino); 970 return -EINVAL; 971 } 972 973 if (orig_inode->i_size > donor_inode->i_size) { 974 if (orig_start >= donor_inode->i_size) { 975 ext4_debug("ext4 move extent: orig start offset " 976 "[%llu] should be less than donor file size " 977 "[%lld] [ino:orig %lu, donor_inode %lu]\n", 978 orig_start, donor_inode->i_size, 979 orig_inode->i_ino, donor_inode->i_ino); 980 return -EINVAL; 981 } 982 983 if (orig_start + *len > donor_inode->i_size) { 984 ext4_debug("ext4 move extent: End offset [%llu] should " 985 "be less than donor file size [%lld]." 986 "So adjust length from %llu to %lld " 987 "[ino:orig %lu, donor %lu]\n", 988 orig_start + *len, donor_inode->i_size, 989 *len, donor_inode->i_size - orig_start, 990 orig_inode->i_ino, donor_inode->i_ino); 991 *len = donor_inode->i_size - orig_start; 992 } 993 } else { 994 if (orig_start >= orig_inode->i_size) { 995 ext4_debug("ext4 move extent: start offset [%llu] " 996 "should be less than original file size " 997 "[%lld] [inode:orig %lu, donor %lu]\n", 998 orig_start, orig_inode->i_size, 999 orig_inode->i_ino, donor_inode->i_ino); 1000 return -EINVAL; 1001 } 1002 1003 if (orig_start + *len > orig_inode->i_size) { 1004 ext4_debug("ext4 move extent: Adjust length " 1005 "from %llu to %lld. Because it should be " 1006 "less than original file size " 1007 "[ino:orig %lu, donor %lu]\n", 1008 *len, orig_inode->i_size - orig_start, 1009 orig_inode->i_ino, donor_inode->i_ino); 1010 *len = orig_inode->i_size - orig_start; 1011 } 1012 } 1013 1014 if (!*len) { 1015 ext4_debug("ext4 move extent: len shoudld not be 0 " 1016 "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, 1017 donor_inode->i_ino); 1018 return -EINVAL; 1019 } 1020 1021 return 0; 1022 } 1023 1024 /** 1025 * mext_inode_double_lock - Lock i_mutex on both @inode1 and @inode2 1026 * 1027 * @inode1: the inode structure 1028 * @inode2: the inode structure 1029 * 1030 * Lock two inodes' i_mutex by i_ino order. This function is moved from 1031 * fs/inode.c. 1032 */ 1033 static void 1034 mext_inode_double_lock(struct inode *inode1, struct inode *inode2) 1035 { 1036 if (inode1 == NULL || inode2 == NULL || inode1 == inode2) { 1037 if (inode1) 1038 mutex_lock(&inode1->i_mutex); 1039 else if (inode2) 1040 mutex_lock(&inode2->i_mutex); 1041 return; 1042 } 1043 1044 if (inode1->i_ino < inode2->i_ino) { 1045 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); 1046 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); 1047 } else { 1048 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); 1049 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); 1050 } 1051 } 1052 1053 /** 1054 * mext_inode_double_unlock - Release i_mutex on both @inode1 and @inode2 1055 * 1056 * @inode1: the inode that is released first 1057 * @inode2: the inode that is released second 1058 * 1059 * This function is moved from fs/inode.c. 1060 */ 1061 1062 static void 1063 mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) 1064 { 1065 if (inode1) 1066 mutex_unlock(&inode1->i_mutex); 1067 1068 if (inode2 && inode2 != inode1) 1069 mutex_unlock(&inode2->i_mutex); 1070 } 1071 1072 /** 1073 * ext4_move_extents - Exchange the specified range of a file 1074 * 1075 * @o_filp: file structure of the original file 1076 * @d_filp: file structure of the donor file 1077 * @orig_start: start offset in block for orig 1078 * @donor_start: start offset in block for donor 1079 * @len: the number of blocks to be moved 1080 * @moved_len: moved block length 1081 * 1082 * This function returns 0 and moved block length is set in moved_len 1083 * if succeed, otherwise returns error value. 1084 * 1085 * Note: ext4_move_extents() proceeds the following order. 1086 * 1:ext4_move_extents() calculates the last block number of moving extent 1087 * function by the start block number (orig_start) and the number of blocks 1088 * to be moved (len) specified as arguments. 1089 * If the {orig, donor}_start points a hole, the extent's start offset 1090 * pointed by ext_cur (current extent), holecheck_path, orig_path are set 1091 * after hole behind. 1092 * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent 1093 * or the ext_cur exceeds the block_end which is last logical block number. 1094 * 3:To get the length of continues area, call mext_next_extent() 1095 * specified with the ext_cur (initial value is holecheck_path) re-cursive, 1096 * until find un-continuous extent, the start logical block number exceeds 1097 * the block_end or the extent points to the last extent. 1098 * 4:Exchange the original inode data with donor inode data 1099 * from orig_page_offset to seq_end_page. 1100 * The start indexes of data are specified as arguments. 1101 * That of the original inode is orig_page_offset, 1102 * and the donor inode is also orig_page_offset 1103 * (To easily handle blocksize != pagesize case, the offset for the 1104 * donor inode is block unit). 1105 * 5:Update holecheck_path and orig_path to points a next proceeding extent, 1106 * then returns to step 2. 1107 * 6:Release holecheck_path, orig_path and set the len to moved_len 1108 * which shows the number of moved blocks. 1109 * The moved_len is useful for the command to calculate the file offset 1110 * for starting next move extent ioctl. 1111 * 7:Return 0 on success, or a negative error value on failure. 1112 */ 1113 int 1114 ext4_move_extents(struct file *o_filp, struct file *d_filp, 1115 __u64 orig_start, __u64 donor_start, __u64 len, 1116 __u64 *moved_len) 1117 { 1118 struct inode *orig_inode = o_filp->f_dentry->d_inode; 1119 struct inode *donor_inode = d_filp->f_dentry->d_inode; 1120 struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL; 1121 struct ext4_extent *ext_prev, *ext_cur, *ext_dummy; 1122 ext4_lblk_t block_start = orig_start; 1123 ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; 1124 ext4_lblk_t rest_blocks; 1125 pgoff_t orig_page_offset = 0, seq_end_page; 1126 int ret, depth, last_extent = 0; 1127 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 1128 int data_offset_in_page; 1129 int block_len_in_page; 1130 int uninit; 1131 1132 /* protect orig and donor against a truncate */ 1133 mext_inode_double_lock(orig_inode, donor_inode); 1134 1135 mext_double_down_read(orig_inode, donor_inode); 1136 /* Check the filesystem environment whether move_extent can be done */ 1137 ret = mext_check_arguments(orig_inode, donor_inode, orig_start, 1138 donor_start, &len, *moved_len); 1139 mext_double_up_read(orig_inode, donor_inode); 1140 if (ret) 1141 goto out2; 1142 1143 file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; 1144 block_end = block_start + len - 1; 1145 if (file_end < block_end) 1146 len -= block_end - file_end; 1147 1148 get_ext_path(orig_path, orig_inode, block_start, ret); 1149 if (orig_path == NULL) 1150 goto out2; 1151 1152 /* Get path structure to check the hole */ 1153 get_ext_path(holecheck_path, orig_inode, block_start, ret); 1154 if (holecheck_path == NULL) 1155 goto out; 1156 1157 depth = ext_depth(orig_inode); 1158 ext_cur = holecheck_path[depth].p_ext; 1159 if (ext_cur == NULL) { 1160 ret = -EINVAL; 1161 goto out; 1162 } 1163 1164 /* 1165 * Get proper extent whose ee_block is beyond block_start 1166 * if block_start was within the hole. 1167 */ 1168 if (le32_to_cpu(ext_cur->ee_block) + 1169 ext4_ext_get_actual_len(ext_cur) - 1 < block_start) { 1170 last_extent = mext_next_extent(orig_inode, 1171 holecheck_path, &ext_cur); 1172 if (last_extent < 0) { 1173 ret = last_extent; 1174 goto out; 1175 } 1176 last_extent = mext_next_extent(orig_inode, orig_path, 1177 &ext_dummy); 1178 if (last_extent < 0) { 1179 ret = last_extent; 1180 goto out; 1181 } 1182 } 1183 seq_start = block_start; 1184 1185 /* No blocks within the specified range. */ 1186 if (le32_to_cpu(ext_cur->ee_block) > block_end) { 1187 ext4_debug("ext4 move extent: The specified range of file " 1188 "may be the hole\n"); 1189 ret = -EINVAL; 1190 goto out; 1191 } 1192 1193 /* Adjust start blocks */ 1194 add_blocks = min(le32_to_cpu(ext_cur->ee_block) + 1195 ext4_ext_get_actual_len(ext_cur), block_end + 1) - 1196 max(le32_to_cpu(ext_cur->ee_block), block_start); 1197 1198 while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) { 1199 seq_blocks += add_blocks; 1200 1201 /* Adjust tail blocks */ 1202 if (seq_start + seq_blocks - 1 > block_end) 1203 seq_blocks = block_end - seq_start + 1; 1204 1205 ext_prev = ext_cur; 1206 last_extent = mext_next_extent(orig_inode, holecheck_path, 1207 &ext_cur); 1208 if (last_extent < 0) { 1209 ret = last_extent; 1210 break; 1211 } 1212 add_blocks = ext4_ext_get_actual_len(ext_cur); 1213 1214 /* 1215 * Extend the length of contiguous block (seq_blocks) 1216 * if extents are contiguous. 1217 */ 1218 if (ext4_can_extents_be_merged(orig_inode, 1219 ext_prev, ext_cur) && 1220 block_end >= le32_to_cpu(ext_cur->ee_block) && 1221 !last_extent) 1222 continue; 1223 1224 /* Is original extent is uninitialized */ 1225 uninit = ext4_ext_is_uninitialized(ext_prev); 1226 1227 data_offset_in_page = seq_start % blocks_per_page; 1228 1229 /* 1230 * Calculate data blocks count that should be swapped 1231 * at the first page. 1232 */ 1233 if (data_offset_in_page + seq_blocks > blocks_per_page) { 1234 /* Swapped blocks are across pages */ 1235 block_len_in_page = 1236 blocks_per_page - data_offset_in_page; 1237 } else { 1238 /* Swapped blocks are in a page */ 1239 block_len_in_page = seq_blocks; 1240 } 1241 1242 orig_page_offset = seq_start >> 1243 (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); 1244 seq_end_page = (seq_start + seq_blocks - 1) >> 1245 (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); 1246 seq_start = le32_to_cpu(ext_cur->ee_block); 1247 rest_blocks = seq_blocks; 1248 1249 /* Discard preallocations of two inodes */ 1250 down_write(&EXT4_I(orig_inode)->i_data_sem); 1251 ext4_discard_preallocations(orig_inode); 1252 up_write(&EXT4_I(orig_inode)->i_data_sem); 1253 1254 down_write(&EXT4_I(donor_inode)->i_data_sem); 1255 ext4_discard_preallocations(donor_inode); 1256 up_write(&EXT4_I(donor_inode)->i_data_sem); 1257 1258 while (orig_page_offset <= seq_end_page) { 1259 1260 /* Swap original branches with new branches */ 1261 ret = move_extent_par_page(o_filp, donor_inode, 1262 orig_page_offset, 1263 data_offset_in_page, 1264 block_len_in_page, uninit); 1265 if (ret < 0) 1266 goto out; 1267 orig_page_offset++; 1268 /* Count how many blocks we have exchanged */ 1269 *moved_len += block_len_in_page; 1270 BUG_ON(*moved_len > len); 1271 1272 data_offset_in_page = 0; 1273 rest_blocks -= block_len_in_page; 1274 if (rest_blocks > blocks_per_page) 1275 block_len_in_page = blocks_per_page; 1276 else 1277 block_len_in_page = rest_blocks; 1278 } 1279 1280 /* Decrease buffer counter */ 1281 if (holecheck_path) 1282 ext4_ext_drop_refs(holecheck_path); 1283 get_ext_path(holecheck_path, orig_inode, 1284 seq_start, ret); 1285 if (holecheck_path == NULL) 1286 break; 1287 depth = holecheck_path->p_depth; 1288 1289 /* Decrease buffer counter */ 1290 if (orig_path) 1291 ext4_ext_drop_refs(orig_path); 1292 get_ext_path(orig_path, orig_inode, seq_start, ret); 1293 if (orig_path == NULL) 1294 break; 1295 1296 ext_cur = holecheck_path[depth].p_ext; 1297 add_blocks = ext4_ext_get_actual_len(ext_cur); 1298 seq_blocks = 0; 1299 1300 } 1301 out: 1302 if (orig_path) { 1303 ext4_ext_drop_refs(orig_path); 1304 kfree(orig_path); 1305 } 1306 if (holecheck_path) { 1307 ext4_ext_drop_refs(holecheck_path); 1308 kfree(holecheck_path); 1309 } 1310 out2: 1311 mext_inode_double_unlock(orig_inode, donor_inode); 1312 1313 if (ret) 1314 return ret; 1315 1316 /* All of the specified blocks must be exchanged in succeed */ 1317 BUG_ON(*moved_len != len); 1318 1319 return 0; 1320 } 1321