1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/blkdev.h> 22 #include <linux/list_sort.h> 23 #include "tree-log.h" 24 #include "disk-io.h" 25 #include "locking.h" 26 #include "print-tree.h" 27 #include "backref.h" 28 #include "hash.h" 29 #include "compression.h" 30 #include "qgroup.h" 31 32 /* magic values for the inode_only field in btrfs_log_inode: 33 * 34 * LOG_INODE_ALL means to log everything 35 * LOG_INODE_EXISTS means to log just enough to recreate the inode 36 * during log replay 37 */ 38 #define LOG_INODE_ALL 0 39 #define LOG_INODE_EXISTS 1 40 41 /* 42 * directory trouble cases 43 * 44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 45 * log, we must force a full commit before doing an fsync of the directory 46 * where the unlink was done. 47 * ---> record transid of last unlink/rename per directory 48 * 49 * mkdir foo/some_dir 50 * normal commit 51 * rename foo/some_dir foo2/some_dir 52 * mkdir foo/some_dir 53 * fsync foo/some_dir/some_file 54 * 55 * The fsync above will unlink the original some_dir without recording 56 * it in its new location (foo2). After a crash, some_dir will be gone 57 * unless the fsync of some_file forces a full commit 58 * 59 * 2) we must log any new names for any file or dir that is in the fsync 60 * log. ---> check inode while renaming/linking. 61 * 62 * 2a) we must log any new names for any file or dir during rename 63 * when the directory they are being removed from was logged. 64 * ---> check inode and old parent dir during rename 65 * 66 * 2a is actually the more important variant. With the extra logging 67 * a crash might unlink the old name without recreating the new one 68 * 69 * 3) after a crash, we must go through any directories with a link count 70 * of zero and redo the rm -rf 71 * 72 * mkdir f1/foo 73 * normal commit 74 * rm -rf f1/foo 75 * fsync(f1) 76 * 77 * The directory f1 was fully removed from the FS, but fsync was never 78 * called on f1, only its parent dir. After a crash the rm -rf must 79 * be replayed. This must be able to recurse down the entire 80 * directory tree. The inode link count fixup code takes care of the 81 * ugly details. 82 */ 83 84 /* 85 * stages for the tree walking. The first 86 * stage (0) is to only pin down the blocks we find 87 * the second stage (1) is to make sure that all the inodes 88 * we find in the log are created in the subvolume. 89 * 90 * The last stage is to deal with directories and links and extents 91 * and all the other fun semantics 92 */ 93 #define LOG_WALK_PIN_ONLY 0 94 #define LOG_WALK_REPLAY_INODES 1 95 #define LOG_WALK_REPLAY_DIR_INDEX 2 96 #define LOG_WALK_REPLAY_ALL 3 97 98 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 99 struct btrfs_root *root, struct inode *inode, 100 int inode_only, 101 const loff_t start, 102 const loff_t end, 103 struct btrfs_log_ctx *ctx); 104 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 105 struct btrfs_root *root, 106 struct btrfs_path *path, u64 objectid); 107 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 108 struct btrfs_root *root, 109 struct btrfs_root *log, 110 struct btrfs_path *path, 111 u64 dirid, int del_all); 112 113 /* 114 * tree logging is a special write ahead log used to make sure that 115 * fsyncs and O_SYNCs can happen without doing full tree commits. 116 * 117 * Full tree commits are expensive because they require commonly 118 * modified blocks to be recowed, creating many dirty pages in the 119 * extent tree an 4x-6x higher write load than ext3. 120 * 121 * Instead of doing a tree commit on every fsync, we use the 122 * key ranges and transaction ids to find items for a given file or directory 123 * that have changed in this transaction. Those items are copied into 124 * a special tree (one per subvolume root), that tree is written to disk 125 * and then the fsync is considered complete. 126 * 127 * After a crash, items are copied out of the log-tree back into the 128 * subvolume tree. Any file data extents found are recorded in the extent 129 * allocation tree, and the log-tree freed. 130 * 131 * The log tree is read three times, once to pin down all the extents it is 132 * using in ram and once, once to create all the inodes logged in the tree 133 * and once to do all the other items. 134 */ 135 136 /* 137 * start a sub transaction and setup the log tree 138 * this increments the log tree writer count to make the people 139 * syncing the tree wait for us to finish 140 */ 141 static int start_log_trans(struct btrfs_trans_handle *trans, 142 struct btrfs_root *root, 143 struct btrfs_log_ctx *ctx) 144 { 145 struct btrfs_fs_info *fs_info = root->fs_info; 146 int ret = 0; 147 148 mutex_lock(&root->log_mutex); 149 150 if (root->log_root) { 151 if (btrfs_need_log_full_commit(fs_info, trans)) { 152 ret = -EAGAIN; 153 goto out; 154 } 155 156 if (!root->log_start_pid) { 157 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 158 root->log_start_pid = current->pid; 159 } else if (root->log_start_pid != current->pid) { 160 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 161 } 162 } else { 163 mutex_lock(&fs_info->tree_log_mutex); 164 if (!fs_info->log_root_tree) 165 ret = btrfs_init_log_root_tree(trans, fs_info); 166 mutex_unlock(&fs_info->tree_log_mutex); 167 if (ret) 168 goto out; 169 170 ret = btrfs_add_log_tree(trans, root); 171 if (ret) 172 goto out; 173 174 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 175 root->log_start_pid = current->pid; 176 } 177 178 atomic_inc(&root->log_batch); 179 atomic_inc(&root->log_writers); 180 if (ctx) { 181 int index = root->log_transid % 2; 182 list_add_tail(&ctx->list, &root->log_ctxs[index]); 183 ctx->log_transid = root->log_transid; 184 } 185 186 out: 187 mutex_unlock(&root->log_mutex); 188 return ret; 189 } 190 191 /* 192 * returns 0 if there was a log transaction running and we were able 193 * to join, or returns -ENOENT if there were not transactions 194 * in progress 195 */ 196 static int join_running_log_trans(struct btrfs_root *root) 197 { 198 int ret = -ENOENT; 199 200 smp_mb(); 201 if (!root->log_root) 202 return -ENOENT; 203 204 mutex_lock(&root->log_mutex); 205 if (root->log_root) { 206 ret = 0; 207 atomic_inc(&root->log_writers); 208 } 209 mutex_unlock(&root->log_mutex); 210 return ret; 211 } 212 213 /* 214 * This either makes the current running log transaction wait 215 * until you call btrfs_end_log_trans() or it makes any future 216 * log transactions wait until you call btrfs_end_log_trans() 217 */ 218 int btrfs_pin_log_trans(struct btrfs_root *root) 219 { 220 int ret = -ENOENT; 221 222 mutex_lock(&root->log_mutex); 223 atomic_inc(&root->log_writers); 224 mutex_unlock(&root->log_mutex); 225 return ret; 226 } 227 228 /* 229 * indicate we're done making changes to the log tree 230 * and wake up anyone waiting to do a sync 231 */ 232 void btrfs_end_log_trans(struct btrfs_root *root) 233 { 234 if (atomic_dec_and_test(&root->log_writers)) { 235 /* 236 * Implicit memory barrier after atomic_dec_and_test 237 */ 238 if (waitqueue_active(&root->log_writer_wait)) 239 wake_up(&root->log_writer_wait); 240 } 241 } 242 243 244 /* 245 * the walk control struct is used to pass state down the chain when 246 * processing the log tree. The stage field tells us which part 247 * of the log tree processing we are currently doing. The others 248 * are state fields used for that specific part 249 */ 250 struct walk_control { 251 /* should we free the extent on disk when done? This is used 252 * at transaction commit time while freeing a log tree 253 */ 254 int free; 255 256 /* should we write out the extent buffer? This is used 257 * while flushing the log tree to disk during a sync 258 */ 259 int write; 260 261 /* should we wait for the extent buffer io to finish? Also used 262 * while flushing the log tree to disk for a sync 263 */ 264 int wait; 265 266 /* pin only walk, we record which extents on disk belong to the 267 * log trees 268 */ 269 int pin; 270 271 /* what stage of the replay code we're currently in */ 272 int stage; 273 274 /* the root we are currently replaying */ 275 struct btrfs_root *replay_dest; 276 277 /* the trans handle for the current replay */ 278 struct btrfs_trans_handle *trans; 279 280 /* the function that gets used to process blocks we find in the 281 * tree. Note the extent_buffer might not be up to date when it is 282 * passed in, and it must be checked or read if you need the data 283 * inside it 284 */ 285 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 286 struct walk_control *wc, u64 gen); 287 }; 288 289 /* 290 * process_func used to pin down extents, write them or wait on them 291 */ 292 static int process_one_buffer(struct btrfs_root *log, 293 struct extent_buffer *eb, 294 struct walk_control *wc, u64 gen) 295 { 296 struct btrfs_fs_info *fs_info = log->fs_info; 297 int ret = 0; 298 299 /* 300 * If this fs is mixed then we need to be able to process the leaves to 301 * pin down any logged extents, so we have to read the block. 302 */ 303 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 304 ret = btrfs_read_buffer(eb, gen); 305 if (ret) 306 return ret; 307 } 308 309 if (wc->pin) 310 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, 311 eb->len); 312 313 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 314 if (wc->pin && btrfs_header_level(eb) == 0) 315 ret = btrfs_exclude_logged_extents(fs_info, eb); 316 if (wc->write) 317 btrfs_write_tree_block(eb); 318 if (wc->wait) 319 btrfs_wait_tree_block_writeback(eb); 320 } 321 return ret; 322 } 323 324 /* 325 * Item overwrite used by replay and tree logging. eb, slot and key all refer 326 * to the src data we are copying out. 327 * 328 * root is the tree we are copying into, and path is a scratch 329 * path for use in this function (it should be released on entry and 330 * will be released on exit). 331 * 332 * If the key is already in the destination tree the existing item is 333 * overwritten. If the existing item isn't big enough, it is extended. 334 * If it is too large, it is truncated. 335 * 336 * If the key isn't in the destination yet, a new item is inserted. 337 */ 338 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 339 struct btrfs_root *root, 340 struct btrfs_path *path, 341 struct extent_buffer *eb, int slot, 342 struct btrfs_key *key) 343 { 344 struct btrfs_fs_info *fs_info = root->fs_info; 345 int ret; 346 u32 item_size; 347 u64 saved_i_size = 0; 348 int save_old_i_size = 0; 349 unsigned long src_ptr; 350 unsigned long dst_ptr; 351 int overwrite_root = 0; 352 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 353 354 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 355 overwrite_root = 1; 356 357 item_size = btrfs_item_size_nr(eb, slot); 358 src_ptr = btrfs_item_ptr_offset(eb, slot); 359 360 /* look for the key in the destination tree */ 361 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 362 if (ret < 0) 363 return ret; 364 365 if (ret == 0) { 366 char *src_copy; 367 char *dst_copy; 368 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 369 path->slots[0]); 370 if (dst_size != item_size) 371 goto insert; 372 373 if (item_size == 0) { 374 btrfs_release_path(path); 375 return 0; 376 } 377 dst_copy = kmalloc(item_size, GFP_NOFS); 378 src_copy = kmalloc(item_size, GFP_NOFS); 379 if (!dst_copy || !src_copy) { 380 btrfs_release_path(path); 381 kfree(dst_copy); 382 kfree(src_copy); 383 return -ENOMEM; 384 } 385 386 read_extent_buffer(eb, src_copy, src_ptr, item_size); 387 388 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 389 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 390 item_size); 391 ret = memcmp(dst_copy, src_copy, item_size); 392 393 kfree(dst_copy); 394 kfree(src_copy); 395 /* 396 * they have the same contents, just return, this saves 397 * us from cowing blocks in the destination tree and doing 398 * extra writes that may not have been done by a previous 399 * sync 400 */ 401 if (ret == 0) { 402 btrfs_release_path(path); 403 return 0; 404 } 405 406 /* 407 * We need to load the old nbytes into the inode so when we 408 * replay the extents we've logged we get the right nbytes. 409 */ 410 if (inode_item) { 411 struct btrfs_inode_item *item; 412 u64 nbytes; 413 u32 mode; 414 415 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 416 struct btrfs_inode_item); 417 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 418 item = btrfs_item_ptr(eb, slot, 419 struct btrfs_inode_item); 420 btrfs_set_inode_nbytes(eb, item, nbytes); 421 422 /* 423 * If this is a directory we need to reset the i_size to 424 * 0 so that we can set it up properly when replaying 425 * the rest of the items in this log. 426 */ 427 mode = btrfs_inode_mode(eb, item); 428 if (S_ISDIR(mode)) 429 btrfs_set_inode_size(eb, item, 0); 430 } 431 } else if (inode_item) { 432 struct btrfs_inode_item *item; 433 u32 mode; 434 435 /* 436 * New inode, set nbytes to 0 so that the nbytes comes out 437 * properly when we replay the extents. 438 */ 439 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 440 btrfs_set_inode_nbytes(eb, item, 0); 441 442 /* 443 * If this is a directory we need to reset the i_size to 0 so 444 * that we can set it up properly when replaying the rest of 445 * the items in this log. 446 */ 447 mode = btrfs_inode_mode(eb, item); 448 if (S_ISDIR(mode)) 449 btrfs_set_inode_size(eb, item, 0); 450 } 451 insert: 452 btrfs_release_path(path); 453 /* try to insert the key into the destination tree */ 454 path->skip_release_on_error = 1; 455 ret = btrfs_insert_empty_item(trans, root, path, 456 key, item_size); 457 path->skip_release_on_error = 0; 458 459 /* make sure any existing item is the correct size */ 460 if (ret == -EEXIST || ret == -EOVERFLOW) { 461 u32 found_size; 462 found_size = btrfs_item_size_nr(path->nodes[0], 463 path->slots[0]); 464 if (found_size > item_size) 465 btrfs_truncate_item(fs_info, path, item_size, 1); 466 else if (found_size < item_size) 467 btrfs_extend_item(fs_info, path, 468 item_size - found_size); 469 } else if (ret) { 470 return ret; 471 } 472 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 473 path->slots[0]); 474 475 /* don't overwrite an existing inode if the generation number 476 * was logged as zero. This is done when the tree logging code 477 * is just logging an inode to make sure it exists after recovery. 478 * 479 * Also, don't overwrite i_size on directories during replay. 480 * log replay inserts and removes directory items based on the 481 * state of the tree found in the subvolume, and i_size is modified 482 * as it goes 483 */ 484 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 485 struct btrfs_inode_item *src_item; 486 struct btrfs_inode_item *dst_item; 487 488 src_item = (struct btrfs_inode_item *)src_ptr; 489 dst_item = (struct btrfs_inode_item *)dst_ptr; 490 491 if (btrfs_inode_generation(eb, src_item) == 0) { 492 struct extent_buffer *dst_eb = path->nodes[0]; 493 const u64 ino_size = btrfs_inode_size(eb, src_item); 494 495 /* 496 * For regular files an ino_size == 0 is used only when 497 * logging that an inode exists, as part of a directory 498 * fsync, and the inode wasn't fsynced before. In this 499 * case don't set the size of the inode in the fs/subvol 500 * tree, otherwise we would be throwing valid data away. 501 */ 502 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 503 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 504 ino_size != 0) { 505 struct btrfs_map_token token; 506 507 btrfs_init_map_token(&token); 508 btrfs_set_token_inode_size(dst_eb, dst_item, 509 ino_size, &token); 510 } 511 goto no_copy; 512 } 513 514 if (overwrite_root && 515 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 516 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 517 save_old_i_size = 1; 518 saved_i_size = btrfs_inode_size(path->nodes[0], 519 dst_item); 520 } 521 } 522 523 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 524 src_ptr, item_size); 525 526 if (save_old_i_size) { 527 struct btrfs_inode_item *dst_item; 528 dst_item = (struct btrfs_inode_item *)dst_ptr; 529 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 530 } 531 532 /* make sure the generation is filled in */ 533 if (key->type == BTRFS_INODE_ITEM_KEY) { 534 struct btrfs_inode_item *dst_item; 535 dst_item = (struct btrfs_inode_item *)dst_ptr; 536 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 537 btrfs_set_inode_generation(path->nodes[0], dst_item, 538 trans->transid); 539 } 540 } 541 no_copy: 542 btrfs_mark_buffer_dirty(path->nodes[0]); 543 btrfs_release_path(path); 544 return 0; 545 } 546 547 /* 548 * simple helper to read an inode off the disk from a given root 549 * This can only be called for subvolume roots and not for the log 550 */ 551 static noinline struct inode *read_one_inode(struct btrfs_root *root, 552 u64 objectid) 553 { 554 struct btrfs_key key; 555 struct inode *inode; 556 557 key.objectid = objectid; 558 key.type = BTRFS_INODE_ITEM_KEY; 559 key.offset = 0; 560 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 561 if (IS_ERR(inode)) { 562 inode = NULL; 563 } else if (is_bad_inode(inode)) { 564 iput(inode); 565 inode = NULL; 566 } 567 return inode; 568 } 569 570 /* replays a single extent in 'eb' at 'slot' with 'key' into the 571 * subvolume 'root'. path is released on entry and should be released 572 * on exit. 573 * 574 * extents in the log tree have not been allocated out of the extent 575 * tree yet. So, this completes the allocation, taking a reference 576 * as required if the extent already exists or creating a new extent 577 * if it isn't in the extent allocation tree yet. 578 * 579 * The extent is inserted into the file, dropping any existing extents 580 * from the file that overlap the new one. 581 */ 582 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 583 struct btrfs_root *root, 584 struct btrfs_path *path, 585 struct extent_buffer *eb, int slot, 586 struct btrfs_key *key) 587 { 588 struct btrfs_fs_info *fs_info = root->fs_info; 589 int found_type; 590 u64 extent_end; 591 u64 start = key->offset; 592 u64 nbytes = 0; 593 struct btrfs_file_extent_item *item; 594 struct inode *inode = NULL; 595 unsigned long size; 596 int ret = 0; 597 598 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 599 found_type = btrfs_file_extent_type(eb, item); 600 601 if (found_type == BTRFS_FILE_EXTENT_REG || 602 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 603 nbytes = btrfs_file_extent_num_bytes(eb, item); 604 extent_end = start + nbytes; 605 606 /* 607 * We don't add to the inodes nbytes if we are prealloc or a 608 * hole. 609 */ 610 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 611 nbytes = 0; 612 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 613 size = btrfs_file_extent_inline_len(eb, slot, item); 614 nbytes = btrfs_file_extent_ram_bytes(eb, item); 615 extent_end = ALIGN(start + size, 616 fs_info->sectorsize); 617 } else { 618 ret = 0; 619 goto out; 620 } 621 622 inode = read_one_inode(root, key->objectid); 623 if (!inode) { 624 ret = -EIO; 625 goto out; 626 } 627 628 /* 629 * first check to see if we already have this extent in the 630 * file. This must be done before the btrfs_drop_extents run 631 * so we don't try to drop this extent. 632 */ 633 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 634 start, 0); 635 636 if (ret == 0 && 637 (found_type == BTRFS_FILE_EXTENT_REG || 638 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 639 struct btrfs_file_extent_item cmp1; 640 struct btrfs_file_extent_item cmp2; 641 struct btrfs_file_extent_item *existing; 642 struct extent_buffer *leaf; 643 644 leaf = path->nodes[0]; 645 existing = btrfs_item_ptr(leaf, path->slots[0], 646 struct btrfs_file_extent_item); 647 648 read_extent_buffer(eb, &cmp1, (unsigned long)item, 649 sizeof(cmp1)); 650 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 651 sizeof(cmp2)); 652 653 /* 654 * we already have a pointer to this exact extent, 655 * we don't have to do anything 656 */ 657 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 658 btrfs_release_path(path); 659 goto out; 660 } 661 } 662 btrfs_release_path(path); 663 664 /* drop any overlapping extents */ 665 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 666 if (ret) 667 goto out; 668 669 if (found_type == BTRFS_FILE_EXTENT_REG || 670 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 671 u64 offset; 672 unsigned long dest_offset; 673 struct btrfs_key ins; 674 675 ret = btrfs_insert_empty_item(trans, root, path, key, 676 sizeof(*item)); 677 if (ret) 678 goto out; 679 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 680 path->slots[0]); 681 copy_extent_buffer(path->nodes[0], eb, dest_offset, 682 (unsigned long)item, sizeof(*item)); 683 684 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 685 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 686 ins.type = BTRFS_EXTENT_ITEM_KEY; 687 offset = key->offset - btrfs_file_extent_offset(eb, item); 688 689 /* 690 * Manually record dirty extent, as here we did a shallow 691 * file extent item copy and skip normal backref update, 692 * but modifying extent tree all by ourselves. 693 * So need to manually record dirty extent for qgroup, 694 * as the owner of the file extent changed from log tree 695 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 696 */ 697 ret = btrfs_qgroup_trace_extent(trans, fs_info, 698 btrfs_file_extent_disk_bytenr(eb, item), 699 btrfs_file_extent_disk_num_bytes(eb, item), 700 GFP_NOFS); 701 if (ret < 0) 702 goto out; 703 704 if (ins.objectid > 0) { 705 u64 csum_start; 706 u64 csum_end; 707 LIST_HEAD(ordered_sums); 708 /* 709 * is this extent already allocated in the extent 710 * allocation tree? If so, just add a reference 711 */ 712 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 713 ins.offset); 714 if (ret == 0) { 715 ret = btrfs_inc_extent_ref(trans, fs_info, 716 ins.objectid, ins.offset, 717 0, root->root_key.objectid, 718 key->objectid, offset); 719 if (ret) 720 goto out; 721 } else { 722 /* 723 * insert the extent pointer in the extent 724 * allocation tree 725 */ 726 ret = btrfs_alloc_logged_file_extent(trans, 727 fs_info, 728 root->root_key.objectid, 729 key->objectid, offset, &ins); 730 if (ret) 731 goto out; 732 } 733 btrfs_release_path(path); 734 735 if (btrfs_file_extent_compression(eb, item)) { 736 csum_start = ins.objectid; 737 csum_end = csum_start + ins.offset; 738 } else { 739 csum_start = ins.objectid + 740 btrfs_file_extent_offset(eb, item); 741 csum_end = csum_start + 742 btrfs_file_extent_num_bytes(eb, item); 743 } 744 745 ret = btrfs_lookup_csums_range(root->log_root, 746 csum_start, csum_end - 1, 747 &ordered_sums, 0); 748 if (ret) 749 goto out; 750 /* 751 * Now delete all existing cums in the csum root that 752 * cover our range. We do this because we can have an 753 * extent that is completely referenced by one file 754 * extent item and partially referenced by another 755 * file extent item (like after using the clone or 756 * extent_same ioctls). In this case if we end up doing 757 * the replay of the one that partially references the 758 * extent first, and we do not do the csum deletion 759 * below, we can get 2 csum items in the csum tree that 760 * overlap each other. For example, imagine our log has 761 * the two following file extent items: 762 * 763 * key (257 EXTENT_DATA 409600) 764 * extent data disk byte 12845056 nr 102400 765 * extent data offset 20480 nr 20480 ram 102400 766 * 767 * key (257 EXTENT_DATA 819200) 768 * extent data disk byte 12845056 nr 102400 769 * extent data offset 0 nr 102400 ram 102400 770 * 771 * Where the second one fully references the 100K extent 772 * that starts at disk byte 12845056, and the log tree 773 * has a single csum item that covers the entire range 774 * of the extent: 775 * 776 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 777 * 778 * After the first file extent item is replayed, the 779 * csum tree gets the following csum item: 780 * 781 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 782 * 783 * Which covers the 20K sub-range starting at offset 20K 784 * of our extent. Now when we replay the second file 785 * extent item, if we do not delete existing csum items 786 * that cover any of its blocks, we end up getting two 787 * csum items in our csum tree that overlap each other: 788 * 789 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 790 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 791 * 792 * Which is a problem, because after this anyone trying 793 * to lookup up for the checksum of any block of our 794 * extent starting at an offset of 40K or higher, will 795 * end up looking at the second csum item only, which 796 * does not contain the checksum for any block starting 797 * at offset 40K or higher of our extent. 798 */ 799 while (!list_empty(&ordered_sums)) { 800 struct btrfs_ordered_sum *sums; 801 sums = list_entry(ordered_sums.next, 802 struct btrfs_ordered_sum, 803 list); 804 if (!ret) 805 ret = btrfs_del_csums(trans, fs_info, 806 sums->bytenr, 807 sums->len); 808 if (!ret) 809 ret = btrfs_csum_file_blocks(trans, 810 fs_info->csum_root, sums); 811 list_del(&sums->list); 812 kfree(sums); 813 } 814 if (ret) 815 goto out; 816 } else { 817 btrfs_release_path(path); 818 } 819 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 820 /* inline extents are easy, we just overwrite them */ 821 ret = overwrite_item(trans, root, path, eb, slot, key); 822 if (ret) 823 goto out; 824 } 825 826 inode_add_bytes(inode, nbytes); 827 ret = btrfs_update_inode(trans, root, inode); 828 out: 829 if (inode) 830 iput(inode); 831 return ret; 832 } 833 834 /* 835 * when cleaning up conflicts between the directory names in the 836 * subvolume, directory names in the log and directory names in the 837 * inode back references, we may have to unlink inodes from directories. 838 * 839 * This is a helper function to do the unlink of a specific directory 840 * item 841 */ 842 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 843 struct btrfs_root *root, 844 struct btrfs_path *path, 845 struct inode *dir, 846 struct btrfs_dir_item *di) 847 { 848 struct btrfs_fs_info *fs_info = root->fs_info; 849 struct inode *inode; 850 char *name; 851 int name_len; 852 struct extent_buffer *leaf; 853 struct btrfs_key location; 854 int ret; 855 856 leaf = path->nodes[0]; 857 858 btrfs_dir_item_key_to_cpu(leaf, di, &location); 859 name_len = btrfs_dir_name_len(leaf, di); 860 name = kmalloc(name_len, GFP_NOFS); 861 if (!name) 862 return -ENOMEM; 863 864 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 865 btrfs_release_path(path); 866 867 inode = read_one_inode(root, location.objectid); 868 if (!inode) { 869 ret = -EIO; 870 goto out; 871 } 872 873 ret = link_to_fixup_dir(trans, root, path, location.objectid); 874 if (ret) 875 goto out; 876 877 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 878 if (ret) 879 goto out; 880 else 881 ret = btrfs_run_delayed_items(trans, fs_info); 882 out: 883 kfree(name); 884 iput(inode); 885 return ret; 886 } 887 888 /* 889 * helper function to see if a given name and sequence number found 890 * in an inode back reference are already in a directory and correctly 891 * point to this inode 892 */ 893 static noinline int inode_in_dir(struct btrfs_root *root, 894 struct btrfs_path *path, 895 u64 dirid, u64 objectid, u64 index, 896 const char *name, int name_len) 897 { 898 struct btrfs_dir_item *di; 899 struct btrfs_key location; 900 int match = 0; 901 902 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 903 index, name, name_len, 0); 904 if (di && !IS_ERR(di)) { 905 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 906 if (location.objectid != objectid) 907 goto out; 908 } else 909 goto out; 910 btrfs_release_path(path); 911 912 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 913 if (di && !IS_ERR(di)) { 914 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 915 if (location.objectid != objectid) 916 goto out; 917 } else 918 goto out; 919 match = 1; 920 out: 921 btrfs_release_path(path); 922 return match; 923 } 924 925 /* 926 * helper function to check a log tree for a named back reference in 927 * an inode. This is used to decide if a back reference that is 928 * found in the subvolume conflicts with what we find in the log. 929 * 930 * inode backreferences may have multiple refs in a single item, 931 * during replay we process one reference at a time, and we don't 932 * want to delete valid links to a file from the subvolume if that 933 * link is also in the log. 934 */ 935 static noinline int backref_in_log(struct btrfs_root *log, 936 struct btrfs_key *key, 937 u64 ref_objectid, 938 const char *name, int namelen) 939 { 940 struct btrfs_path *path; 941 struct btrfs_inode_ref *ref; 942 unsigned long ptr; 943 unsigned long ptr_end; 944 unsigned long name_ptr; 945 int found_name_len; 946 int item_size; 947 int ret; 948 int match = 0; 949 950 path = btrfs_alloc_path(); 951 if (!path) 952 return -ENOMEM; 953 954 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 955 if (ret != 0) 956 goto out; 957 958 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 959 960 if (key->type == BTRFS_INODE_EXTREF_KEY) { 961 if (btrfs_find_name_in_ext_backref(path, ref_objectid, 962 name, namelen, NULL)) 963 match = 1; 964 965 goto out; 966 } 967 968 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 969 ptr_end = ptr + item_size; 970 while (ptr < ptr_end) { 971 ref = (struct btrfs_inode_ref *)ptr; 972 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 973 if (found_name_len == namelen) { 974 name_ptr = (unsigned long)(ref + 1); 975 ret = memcmp_extent_buffer(path->nodes[0], name, 976 name_ptr, namelen); 977 if (ret == 0) { 978 match = 1; 979 goto out; 980 } 981 } 982 ptr = (unsigned long)(ref + 1) + found_name_len; 983 } 984 out: 985 btrfs_free_path(path); 986 return match; 987 } 988 989 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 990 struct btrfs_root *root, 991 struct btrfs_path *path, 992 struct btrfs_root *log_root, 993 struct inode *dir, struct inode *inode, 994 struct extent_buffer *eb, 995 u64 inode_objectid, u64 parent_objectid, 996 u64 ref_index, char *name, int namelen, 997 int *search_done) 998 { 999 struct btrfs_fs_info *fs_info = root->fs_info; 1000 int ret; 1001 char *victim_name; 1002 int victim_name_len; 1003 struct extent_buffer *leaf; 1004 struct btrfs_dir_item *di; 1005 struct btrfs_key search_key; 1006 struct btrfs_inode_extref *extref; 1007 1008 again: 1009 /* Search old style refs */ 1010 search_key.objectid = inode_objectid; 1011 search_key.type = BTRFS_INODE_REF_KEY; 1012 search_key.offset = parent_objectid; 1013 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1014 if (ret == 0) { 1015 struct btrfs_inode_ref *victim_ref; 1016 unsigned long ptr; 1017 unsigned long ptr_end; 1018 1019 leaf = path->nodes[0]; 1020 1021 /* are we trying to overwrite a back ref for the root directory 1022 * if so, just jump out, we're done 1023 */ 1024 if (search_key.objectid == search_key.offset) 1025 return 1; 1026 1027 /* check all the names in this back reference to see 1028 * if they are in the log. if so, we allow them to stay 1029 * otherwise they must be unlinked as a conflict 1030 */ 1031 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1032 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1033 while (ptr < ptr_end) { 1034 victim_ref = (struct btrfs_inode_ref *)ptr; 1035 victim_name_len = btrfs_inode_ref_name_len(leaf, 1036 victim_ref); 1037 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1038 if (!victim_name) 1039 return -ENOMEM; 1040 1041 read_extent_buffer(leaf, victim_name, 1042 (unsigned long)(victim_ref + 1), 1043 victim_name_len); 1044 1045 if (!backref_in_log(log_root, &search_key, 1046 parent_objectid, 1047 victim_name, 1048 victim_name_len)) { 1049 inc_nlink(inode); 1050 btrfs_release_path(path); 1051 1052 ret = btrfs_unlink_inode(trans, root, dir, 1053 inode, victim_name, 1054 victim_name_len); 1055 kfree(victim_name); 1056 if (ret) 1057 return ret; 1058 ret = btrfs_run_delayed_items(trans, fs_info); 1059 if (ret) 1060 return ret; 1061 *search_done = 1; 1062 goto again; 1063 } 1064 kfree(victim_name); 1065 1066 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1067 } 1068 1069 /* 1070 * NOTE: we have searched root tree and checked the 1071 * corresponding ref, it does not need to check again. 1072 */ 1073 *search_done = 1; 1074 } 1075 btrfs_release_path(path); 1076 1077 /* Same search but for extended refs */ 1078 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1079 inode_objectid, parent_objectid, 0, 1080 0); 1081 if (!IS_ERR_OR_NULL(extref)) { 1082 u32 item_size; 1083 u32 cur_offset = 0; 1084 unsigned long base; 1085 struct inode *victim_parent; 1086 1087 leaf = path->nodes[0]; 1088 1089 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1090 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1091 1092 while (cur_offset < item_size) { 1093 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1094 1095 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1096 1097 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1098 goto next; 1099 1100 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1101 if (!victim_name) 1102 return -ENOMEM; 1103 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1104 victim_name_len); 1105 1106 search_key.objectid = inode_objectid; 1107 search_key.type = BTRFS_INODE_EXTREF_KEY; 1108 search_key.offset = btrfs_extref_hash(parent_objectid, 1109 victim_name, 1110 victim_name_len); 1111 ret = 0; 1112 if (!backref_in_log(log_root, &search_key, 1113 parent_objectid, victim_name, 1114 victim_name_len)) { 1115 ret = -ENOENT; 1116 victim_parent = read_one_inode(root, 1117 parent_objectid); 1118 if (victim_parent) { 1119 inc_nlink(inode); 1120 btrfs_release_path(path); 1121 1122 ret = btrfs_unlink_inode(trans, root, 1123 victim_parent, 1124 inode, 1125 victim_name, 1126 victim_name_len); 1127 if (!ret) 1128 ret = btrfs_run_delayed_items( 1129 trans, 1130 fs_info); 1131 } 1132 iput(victim_parent); 1133 kfree(victim_name); 1134 if (ret) 1135 return ret; 1136 *search_done = 1; 1137 goto again; 1138 } 1139 kfree(victim_name); 1140 if (ret) 1141 return ret; 1142 next: 1143 cur_offset += victim_name_len + sizeof(*extref); 1144 } 1145 *search_done = 1; 1146 } 1147 btrfs_release_path(path); 1148 1149 /* look for a conflicting sequence number */ 1150 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1151 ref_index, name, namelen, 0); 1152 if (di && !IS_ERR(di)) { 1153 ret = drop_one_dir_item(trans, root, path, dir, di); 1154 if (ret) 1155 return ret; 1156 } 1157 btrfs_release_path(path); 1158 1159 /* look for a conflicing name */ 1160 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1161 name, namelen, 0); 1162 if (di && !IS_ERR(di)) { 1163 ret = drop_one_dir_item(trans, root, path, dir, di); 1164 if (ret) 1165 return ret; 1166 } 1167 btrfs_release_path(path); 1168 1169 return 0; 1170 } 1171 1172 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1173 u32 *namelen, char **name, u64 *index, 1174 u64 *parent_objectid) 1175 { 1176 struct btrfs_inode_extref *extref; 1177 1178 extref = (struct btrfs_inode_extref *)ref_ptr; 1179 1180 *namelen = btrfs_inode_extref_name_len(eb, extref); 1181 *name = kmalloc(*namelen, GFP_NOFS); 1182 if (*name == NULL) 1183 return -ENOMEM; 1184 1185 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1186 *namelen); 1187 1188 *index = btrfs_inode_extref_index(eb, extref); 1189 if (parent_objectid) 1190 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1191 1192 return 0; 1193 } 1194 1195 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1196 u32 *namelen, char **name, u64 *index) 1197 { 1198 struct btrfs_inode_ref *ref; 1199 1200 ref = (struct btrfs_inode_ref *)ref_ptr; 1201 1202 *namelen = btrfs_inode_ref_name_len(eb, ref); 1203 *name = kmalloc(*namelen, GFP_NOFS); 1204 if (*name == NULL) 1205 return -ENOMEM; 1206 1207 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1208 1209 *index = btrfs_inode_ref_index(eb, ref); 1210 1211 return 0; 1212 } 1213 1214 /* 1215 * replay one inode back reference item found in the log tree. 1216 * eb, slot and key refer to the buffer and key found in the log tree. 1217 * root is the destination we are replaying into, and path is for temp 1218 * use by this function. (it should be released on return). 1219 */ 1220 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1221 struct btrfs_root *root, 1222 struct btrfs_root *log, 1223 struct btrfs_path *path, 1224 struct extent_buffer *eb, int slot, 1225 struct btrfs_key *key) 1226 { 1227 struct inode *dir = NULL; 1228 struct inode *inode = NULL; 1229 unsigned long ref_ptr; 1230 unsigned long ref_end; 1231 char *name = NULL; 1232 int namelen; 1233 int ret; 1234 int search_done = 0; 1235 int log_ref_ver = 0; 1236 u64 parent_objectid; 1237 u64 inode_objectid; 1238 u64 ref_index = 0; 1239 int ref_struct_size; 1240 1241 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1242 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1243 1244 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1245 struct btrfs_inode_extref *r; 1246 1247 ref_struct_size = sizeof(struct btrfs_inode_extref); 1248 log_ref_ver = 1; 1249 r = (struct btrfs_inode_extref *)ref_ptr; 1250 parent_objectid = btrfs_inode_extref_parent(eb, r); 1251 } else { 1252 ref_struct_size = sizeof(struct btrfs_inode_ref); 1253 parent_objectid = key->offset; 1254 } 1255 inode_objectid = key->objectid; 1256 1257 /* 1258 * it is possible that we didn't log all the parent directories 1259 * for a given inode. If we don't find the dir, just don't 1260 * copy the back ref in. The link count fixup code will take 1261 * care of the rest 1262 */ 1263 dir = read_one_inode(root, parent_objectid); 1264 if (!dir) { 1265 ret = -ENOENT; 1266 goto out; 1267 } 1268 1269 inode = read_one_inode(root, inode_objectid); 1270 if (!inode) { 1271 ret = -EIO; 1272 goto out; 1273 } 1274 1275 while (ref_ptr < ref_end) { 1276 if (log_ref_ver) { 1277 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1278 &ref_index, &parent_objectid); 1279 /* 1280 * parent object can change from one array 1281 * item to another. 1282 */ 1283 if (!dir) 1284 dir = read_one_inode(root, parent_objectid); 1285 if (!dir) { 1286 ret = -ENOENT; 1287 goto out; 1288 } 1289 } else { 1290 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1291 &ref_index); 1292 } 1293 if (ret) 1294 goto out; 1295 1296 /* if we already have a perfect match, we're done */ 1297 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), 1298 ref_index, name, namelen)) { 1299 /* 1300 * look for a conflicting back reference in the 1301 * metadata. if we find one we have to unlink that name 1302 * of the file before we add our new link. Later on, we 1303 * overwrite any existing back reference, and we don't 1304 * want to create dangling pointers in the directory. 1305 */ 1306 1307 if (!search_done) { 1308 ret = __add_inode_ref(trans, root, path, log, 1309 dir, inode, eb, 1310 inode_objectid, 1311 parent_objectid, 1312 ref_index, name, namelen, 1313 &search_done); 1314 if (ret) { 1315 if (ret == 1) 1316 ret = 0; 1317 goto out; 1318 } 1319 } 1320 1321 /* insert our name */ 1322 ret = btrfs_add_link(trans, dir, inode, name, namelen, 1323 0, ref_index); 1324 if (ret) 1325 goto out; 1326 1327 btrfs_update_inode(trans, root, inode); 1328 } 1329 1330 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1331 kfree(name); 1332 name = NULL; 1333 if (log_ref_ver) { 1334 iput(dir); 1335 dir = NULL; 1336 } 1337 } 1338 1339 /* finally write the back reference in the inode */ 1340 ret = overwrite_item(trans, root, path, eb, slot, key); 1341 out: 1342 btrfs_release_path(path); 1343 kfree(name); 1344 iput(dir); 1345 iput(inode); 1346 return ret; 1347 } 1348 1349 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1350 struct btrfs_root *root, u64 ino) 1351 { 1352 int ret; 1353 1354 ret = btrfs_insert_orphan_item(trans, root, ino); 1355 if (ret == -EEXIST) 1356 ret = 0; 1357 1358 return ret; 1359 } 1360 1361 static int count_inode_extrefs(struct btrfs_root *root, 1362 struct inode *inode, struct btrfs_path *path) 1363 { 1364 int ret = 0; 1365 int name_len; 1366 unsigned int nlink = 0; 1367 u32 item_size; 1368 u32 cur_offset = 0; 1369 u64 inode_objectid = btrfs_ino(inode); 1370 u64 offset = 0; 1371 unsigned long ptr; 1372 struct btrfs_inode_extref *extref; 1373 struct extent_buffer *leaf; 1374 1375 while (1) { 1376 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1377 &extref, &offset); 1378 if (ret) 1379 break; 1380 1381 leaf = path->nodes[0]; 1382 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1383 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1384 cur_offset = 0; 1385 1386 while (cur_offset < item_size) { 1387 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1388 name_len = btrfs_inode_extref_name_len(leaf, extref); 1389 1390 nlink++; 1391 1392 cur_offset += name_len + sizeof(*extref); 1393 } 1394 1395 offset++; 1396 btrfs_release_path(path); 1397 } 1398 btrfs_release_path(path); 1399 1400 if (ret < 0 && ret != -ENOENT) 1401 return ret; 1402 return nlink; 1403 } 1404 1405 static int count_inode_refs(struct btrfs_root *root, 1406 struct inode *inode, struct btrfs_path *path) 1407 { 1408 int ret; 1409 struct btrfs_key key; 1410 unsigned int nlink = 0; 1411 unsigned long ptr; 1412 unsigned long ptr_end; 1413 int name_len; 1414 u64 ino = btrfs_ino(inode); 1415 1416 key.objectid = ino; 1417 key.type = BTRFS_INODE_REF_KEY; 1418 key.offset = (u64)-1; 1419 1420 while (1) { 1421 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1422 if (ret < 0) 1423 break; 1424 if (ret > 0) { 1425 if (path->slots[0] == 0) 1426 break; 1427 path->slots[0]--; 1428 } 1429 process_slot: 1430 btrfs_item_key_to_cpu(path->nodes[0], &key, 1431 path->slots[0]); 1432 if (key.objectid != ino || 1433 key.type != BTRFS_INODE_REF_KEY) 1434 break; 1435 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1436 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1437 path->slots[0]); 1438 while (ptr < ptr_end) { 1439 struct btrfs_inode_ref *ref; 1440 1441 ref = (struct btrfs_inode_ref *)ptr; 1442 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1443 ref); 1444 ptr = (unsigned long)(ref + 1) + name_len; 1445 nlink++; 1446 } 1447 1448 if (key.offset == 0) 1449 break; 1450 if (path->slots[0] > 0) { 1451 path->slots[0]--; 1452 goto process_slot; 1453 } 1454 key.offset--; 1455 btrfs_release_path(path); 1456 } 1457 btrfs_release_path(path); 1458 1459 return nlink; 1460 } 1461 1462 /* 1463 * There are a few corners where the link count of the file can't 1464 * be properly maintained during replay. So, instead of adding 1465 * lots of complexity to the log code, we just scan the backrefs 1466 * for any file that has been through replay. 1467 * 1468 * The scan will update the link count on the inode to reflect the 1469 * number of back refs found. If it goes down to zero, the iput 1470 * will free the inode. 1471 */ 1472 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1473 struct btrfs_root *root, 1474 struct inode *inode) 1475 { 1476 struct btrfs_path *path; 1477 int ret; 1478 u64 nlink = 0; 1479 u64 ino = btrfs_ino(inode); 1480 1481 path = btrfs_alloc_path(); 1482 if (!path) 1483 return -ENOMEM; 1484 1485 ret = count_inode_refs(root, inode, path); 1486 if (ret < 0) 1487 goto out; 1488 1489 nlink = ret; 1490 1491 ret = count_inode_extrefs(root, inode, path); 1492 if (ret < 0) 1493 goto out; 1494 1495 nlink += ret; 1496 1497 ret = 0; 1498 1499 if (nlink != inode->i_nlink) { 1500 set_nlink(inode, nlink); 1501 btrfs_update_inode(trans, root, inode); 1502 } 1503 BTRFS_I(inode)->index_cnt = (u64)-1; 1504 1505 if (inode->i_nlink == 0) { 1506 if (S_ISDIR(inode->i_mode)) { 1507 ret = replay_dir_deletes(trans, root, NULL, path, 1508 ino, 1); 1509 if (ret) 1510 goto out; 1511 } 1512 ret = insert_orphan_item(trans, root, ino); 1513 } 1514 1515 out: 1516 btrfs_free_path(path); 1517 return ret; 1518 } 1519 1520 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1521 struct btrfs_root *root, 1522 struct btrfs_path *path) 1523 { 1524 int ret; 1525 struct btrfs_key key; 1526 struct inode *inode; 1527 1528 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1529 key.type = BTRFS_ORPHAN_ITEM_KEY; 1530 key.offset = (u64)-1; 1531 while (1) { 1532 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1533 if (ret < 0) 1534 break; 1535 1536 if (ret == 1) { 1537 if (path->slots[0] == 0) 1538 break; 1539 path->slots[0]--; 1540 } 1541 1542 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1543 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1544 key.type != BTRFS_ORPHAN_ITEM_KEY) 1545 break; 1546 1547 ret = btrfs_del_item(trans, root, path); 1548 if (ret) 1549 goto out; 1550 1551 btrfs_release_path(path); 1552 inode = read_one_inode(root, key.offset); 1553 if (!inode) 1554 return -EIO; 1555 1556 ret = fixup_inode_link_count(trans, root, inode); 1557 iput(inode); 1558 if (ret) 1559 goto out; 1560 1561 /* 1562 * fixup on a directory may create new entries, 1563 * make sure we always look for the highset possible 1564 * offset 1565 */ 1566 key.offset = (u64)-1; 1567 } 1568 ret = 0; 1569 out: 1570 btrfs_release_path(path); 1571 return ret; 1572 } 1573 1574 1575 /* 1576 * record a given inode in the fixup dir so we can check its link 1577 * count when replay is done. The link count is incremented here 1578 * so the inode won't go away until we check it 1579 */ 1580 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1581 struct btrfs_root *root, 1582 struct btrfs_path *path, 1583 u64 objectid) 1584 { 1585 struct btrfs_key key; 1586 int ret = 0; 1587 struct inode *inode; 1588 1589 inode = read_one_inode(root, objectid); 1590 if (!inode) 1591 return -EIO; 1592 1593 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1594 key.type = BTRFS_ORPHAN_ITEM_KEY; 1595 key.offset = objectid; 1596 1597 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1598 1599 btrfs_release_path(path); 1600 if (ret == 0) { 1601 if (!inode->i_nlink) 1602 set_nlink(inode, 1); 1603 else 1604 inc_nlink(inode); 1605 ret = btrfs_update_inode(trans, root, inode); 1606 } else if (ret == -EEXIST) { 1607 ret = 0; 1608 } else { 1609 BUG(); /* Logic Error */ 1610 } 1611 iput(inode); 1612 1613 return ret; 1614 } 1615 1616 /* 1617 * when replaying the log for a directory, we only insert names 1618 * for inodes that actually exist. This means an fsync on a directory 1619 * does not implicitly fsync all the new files in it 1620 */ 1621 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1622 struct btrfs_root *root, 1623 u64 dirid, u64 index, 1624 char *name, int name_len, 1625 struct btrfs_key *location) 1626 { 1627 struct inode *inode; 1628 struct inode *dir; 1629 int ret; 1630 1631 inode = read_one_inode(root, location->objectid); 1632 if (!inode) 1633 return -ENOENT; 1634 1635 dir = read_one_inode(root, dirid); 1636 if (!dir) { 1637 iput(inode); 1638 return -EIO; 1639 } 1640 1641 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1642 1643 /* FIXME, put inode into FIXUP list */ 1644 1645 iput(inode); 1646 iput(dir); 1647 return ret; 1648 } 1649 1650 /* 1651 * Return true if an inode reference exists in the log for the given name, 1652 * inode and parent inode. 1653 */ 1654 static bool name_in_log_ref(struct btrfs_root *log_root, 1655 const char *name, const int name_len, 1656 const u64 dirid, const u64 ino) 1657 { 1658 struct btrfs_key search_key; 1659 1660 search_key.objectid = ino; 1661 search_key.type = BTRFS_INODE_REF_KEY; 1662 search_key.offset = dirid; 1663 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1664 return true; 1665 1666 search_key.type = BTRFS_INODE_EXTREF_KEY; 1667 search_key.offset = btrfs_extref_hash(dirid, name, name_len); 1668 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1669 return true; 1670 1671 return false; 1672 } 1673 1674 /* 1675 * take a single entry in a log directory item and replay it into 1676 * the subvolume. 1677 * 1678 * if a conflicting item exists in the subdirectory already, 1679 * the inode it points to is unlinked and put into the link count 1680 * fix up tree. 1681 * 1682 * If a name from the log points to a file or directory that does 1683 * not exist in the FS, it is skipped. fsyncs on directories 1684 * do not force down inodes inside that directory, just changes to the 1685 * names or unlinks in a directory. 1686 * 1687 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1688 * non-existing inode) and 1 if the name was replayed. 1689 */ 1690 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1691 struct btrfs_root *root, 1692 struct btrfs_path *path, 1693 struct extent_buffer *eb, 1694 struct btrfs_dir_item *di, 1695 struct btrfs_key *key) 1696 { 1697 char *name; 1698 int name_len; 1699 struct btrfs_dir_item *dst_di; 1700 struct btrfs_key found_key; 1701 struct btrfs_key log_key; 1702 struct inode *dir; 1703 u8 log_type; 1704 int exists; 1705 int ret = 0; 1706 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1707 bool name_added = false; 1708 1709 dir = read_one_inode(root, key->objectid); 1710 if (!dir) 1711 return -EIO; 1712 1713 name_len = btrfs_dir_name_len(eb, di); 1714 name = kmalloc(name_len, GFP_NOFS); 1715 if (!name) { 1716 ret = -ENOMEM; 1717 goto out; 1718 } 1719 1720 log_type = btrfs_dir_type(eb, di); 1721 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1722 name_len); 1723 1724 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1725 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1726 if (exists == 0) 1727 exists = 1; 1728 else 1729 exists = 0; 1730 btrfs_release_path(path); 1731 1732 if (key->type == BTRFS_DIR_ITEM_KEY) { 1733 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1734 name, name_len, 1); 1735 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1736 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1737 key->objectid, 1738 key->offset, name, 1739 name_len, 1); 1740 } else { 1741 /* Corruption */ 1742 ret = -EINVAL; 1743 goto out; 1744 } 1745 if (IS_ERR_OR_NULL(dst_di)) { 1746 /* we need a sequence number to insert, so we only 1747 * do inserts for the BTRFS_DIR_INDEX_KEY types 1748 */ 1749 if (key->type != BTRFS_DIR_INDEX_KEY) 1750 goto out; 1751 goto insert; 1752 } 1753 1754 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1755 /* the existing item matches the logged item */ 1756 if (found_key.objectid == log_key.objectid && 1757 found_key.type == log_key.type && 1758 found_key.offset == log_key.offset && 1759 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1760 update_size = false; 1761 goto out; 1762 } 1763 1764 /* 1765 * don't drop the conflicting directory entry if the inode 1766 * for the new entry doesn't exist 1767 */ 1768 if (!exists) 1769 goto out; 1770 1771 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1772 if (ret) 1773 goto out; 1774 1775 if (key->type == BTRFS_DIR_INDEX_KEY) 1776 goto insert; 1777 out: 1778 btrfs_release_path(path); 1779 if (!ret && update_size) { 1780 btrfs_i_size_write(dir, dir->i_size + name_len * 2); 1781 ret = btrfs_update_inode(trans, root, dir); 1782 } 1783 kfree(name); 1784 iput(dir); 1785 if (!ret && name_added) 1786 ret = 1; 1787 return ret; 1788 1789 insert: 1790 if (name_in_log_ref(root->log_root, name, name_len, 1791 key->objectid, log_key.objectid)) { 1792 /* The dentry will be added later. */ 1793 ret = 0; 1794 update_size = false; 1795 goto out; 1796 } 1797 btrfs_release_path(path); 1798 ret = insert_one_name(trans, root, key->objectid, key->offset, 1799 name, name_len, &log_key); 1800 if (ret && ret != -ENOENT && ret != -EEXIST) 1801 goto out; 1802 if (!ret) 1803 name_added = true; 1804 update_size = false; 1805 ret = 0; 1806 goto out; 1807 } 1808 1809 /* 1810 * find all the names in a directory item and reconcile them into 1811 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1812 * one name in a directory item, but the same code gets used for 1813 * both directory index types 1814 */ 1815 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1816 struct btrfs_root *root, 1817 struct btrfs_path *path, 1818 struct extent_buffer *eb, int slot, 1819 struct btrfs_key *key) 1820 { 1821 struct btrfs_fs_info *fs_info = root->fs_info; 1822 int ret = 0; 1823 u32 item_size = btrfs_item_size_nr(eb, slot); 1824 struct btrfs_dir_item *di; 1825 int name_len; 1826 unsigned long ptr; 1827 unsigned long ptr_end; 1828 struct btrfs_path *fixup_path = NULL; 1829 1830 ptr = btrfs_item_ptr_offset(eb, slot); 1831 ptr_end = ptr + item_size; 1832 while (ptr < ptr_end) { 1833 di = (struct btrfs_dir_item *)ptr; 1834 if (verify_dir_item(fs_info, eb, di)) 1835 return -EIO; 1836 name_len = btrfs_dir_name_len(eb, di); 1837 ret = replay_one_name(trans, root, path, eb, di, key); 1838 if (ret < 0) 1839 break; 1840 ptr = (unsigned long)(di + 1); 1841 ptr += name_len; 1842 1843 /* 1844 * If this entry refers to a non-directory (directories can not 1845 * have a link count > 1) and it was added in the transaction 1846 * that was not committed, make sure we fixup the link count of 1847 * the inode it the entry points to. Otherwise something like 1848 * the following would result in a directory pointing to an 1849 * inode with a wrong link that does not account for this dir 1850 * entry: 1851 * 1852 * mkdir testdir 1853 * touch testdir/foo 1854 * touch testdir/bar 1855 * sync 1856 * 1857 * ln testdir/bar testdir/bar_link 1858 * ln testdir/foo testdir/foo_link 1859 * xfs_io -c "fsync" testdir/bar 1860 * 1861 * <power failure> 1862 * 1863 * mount fs, log replay happens 1864 * 1865 * File foo would remain with a link count of 1 when it has two 1866 * entries pointing to it in the directory testdir. This would 1867 * make it impossible to ever delete the parent directory has 1868 * it would result in stale dentries that can never be deleted. 1869 */ 1870 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 1871 struct btrfs_key di_key; 1872 1873 if (!fixup_path) { 1874 fixup_path = btrfs_alloc_path(); 1875 if (!fixup_path) { 1876 ret = -ENOMEM; 1877 break; 1878 } 1879 } 1880 1881 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1882 ret = link_to_fixup_dir(trans, root, fixup_path, 1883 di_key.objectid); 1884 if (ret) 1885 break; 1886 } 1887 ret = 0; 1888 } 1889 btrfs_free_path(fixup_path); 1890 return ret; 1891 } 1892 1893 /* 1894 * directory replay has two parts. There are the standard directory 1895 * items in the log copied from the subvolume, and range items 1896 * created in the log while the subvolume was logged. 1897 * 1898 * The range items tell us which parts of the key space the log 1899 * is authoritative for. During replay, if a key in the subvolume 1900 * directory is in a logged range item, but not actually in the log 1901 * that means it was deleted from the directory before the fsync 1902 * and should be removed. 1903 */ 1904 static noinline int find_dir_range(struct btrfs_root *root, 1905 struct btrfs_path *path, 1906 u64 dirid, int key_type, 1907 u64 *start_ret, u64 *end_ret) 1908 { 1909 struct btrfs_key key; 1910 u64 found_end; 1911 struct btrfs_dir_log_item *item; 1912 int ret; 1913 int nritems; 1914 1915 if (*start_ret == (u64)-1) 1916 return 1; 1917 1918 key.objectid = dirid; 1919 key.type = key_type; 1920 key.offset = *start_ret; 1921 1922 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1923 if (ret < 0) 1924 goto out; 1925 if (ret > 0) { 1926 if (path->slots[0] == 0) 1927 goto out; 1928 path->slots[0]--; 1929 } 1930 if (ret != 0) 1931 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1932 1933 if (key.type != key_type || key.objectid != dirid) { 1934 ret = 1; 1935 goto next; 1936 } 1937 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1938 struct btrfs_dir_log_item); 1939 found_end = btrfs_dir_log_end(path->nodes[0], item); 1940 1941 if (*start_ret >= key.offset && *start_ret <= found_end) { 1942 ret = 0; 1943 *start_ret = key.offset; 1944 *end_ret = found_end; 1945 goto out; 1946 } 1947 ret = 1; 1948 next: 1949 /* check the next slot in the tree to see if it is a valid item */ 1950 nritems = btrfs_header_nritems(path->nodes[0]); 1951 path->slots[0]++; 1952 if (path->slots[0] >= nritems) { 1953 ret = btrfs_next_leaf(root, path); 1954 if (ret) 1955 goto out; 1956 } 1957 1958 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1959 1960 if (key.type != key_type || key.objectid != dirid) { 1961 ret = 1; 1962 goto out; 1963 } 1964 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1965 struct btrfs_dir_log_item); 1966 found_end = btrfs_dir_log_end(path->nodes[0], item); 1967 *start_ret = key.offset; 1968 *end_ret = found_end; 1969 ret = 0; 1970 out: 1971 btrfs_release_path(path); 1972 return ret; 1973 } 1974 1975 /* 1976 * this looks for a given directory item in the log. If the directory 1977 * item is not in the log, the item is removed and the inode it points 1978 * to is unlinked 1979 */ 1980 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 1981 struct btrfs_root *root, 1982 struct btrfs_root *log, 1983 struct btrfs_path *path, 1984 struct btrfs_path *log_path, 1985 struct inode *dir, 1986 struct btrfs_key *dir_key) 1987 { 1988 struct btrfs_fs_info *fs_info = root->fs_info; 1989 int ret; 1990 struct extent_buffer *eb; 1991 int slot; 1992 u32 item_size; 1993 struct btrfs_dir_item *di; 1994 struct btrfs_dir_item *log_di; 1995 int name_len; 1996 unsigned long ptr; 1997 unsigned long ptr_end; 1998 char *name; 1999 struct inode *inode; 2000 struct btrfs_key location; 2001 2002 again: 2003 eb = path->nodes[0]; 2004 slot = path->slots[0]; 2005 item_size = btrfs_item_size_nr(eb, slot); 2006 ptr = btrfs_item_ptr_offset(eb, slot); 2007 ptr_end = ptr + item_size; 2008 while (ptr < ptr_end) { 2009 di = (struct btrfs_dir_item *)ptr; 2010 if (verify_dir_item(fs_info, eb, di)) { 2011 ret = -EIO; 2012 goto out; 2013 } 2014 2015 name_len = btrfs_dir_name_len(eb, di); 2016 name = kmalloc(name_len, GFP_NOFS); 2017 if (!name) { 2018 ret = -ENOMEM; 2019 goto out; 2020 } 2021 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2022 name_len); 2023 log_di = NULL; 2024 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2025 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2026 dir_key->objectid, 2027 name, name_len, 0); 2028 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2029 log_di = btrfs_lookup_dir_index_item(trans, log, 2030 log_path, 2031 dir_key->objectid, 2032 dir_key->offset, 2033 name, name_len, 0); 2034 } 2035 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) { 2036 btrfs_dir_item_key_to_cpu(eb, di, &location); 2037 btrfs_release_path(path); 2038 btrfs_release_path(log_path); 2039 inode = read_one_inode(root, location.objectid); 2040 if (!inode) { 2041 kfree(name); 2042 return -EIO; 2043 } 2044 2045 ret = link_to_fixup_dir(trans, root, 2046 path, location.objectid); 2047 if (ret) { 2048 kfree(name); 2049 iput(inode); 2050 goto out; 2051 } 2052 2053 inc_nlink(inode); 2054 ret = btrfs_unlink_inode(trans, root, dir, inode, 2055 name, name_len); 2056 if (!ret) 2057 ret = btrfs_run_delayed_items(trans, fs_info); 2058 kfree(name); 2059 iput(inode); 2060 if (ret) 2061 goto out; 2062 2063 /* there might still be more names under this key 2064 * check and repeat if required 2065 */ 2066 ret = btrfs_search_slot(NULL, root, dir_key, path, 2067 0, 0); 2068 if (ret == 0) 2069 goto again; 2070 ret = 0; 2071 goto out; 2072 } else if (IS_ERR(log_di)) { 2073 kfree(name); 2074 return PTR_ERR(log_di); 2075 } 2076 btrfs_release_path(log_path); 2077 kfree(name); 2078 2079 ptr = (unsigned long)(di + 1); 2080 ptr += name_len; 2081 } 2082 ret = 0; 2083 out: 2084 btrfs_release_path(path); 2085 btrfs_release_path(log_path); 2086 return ret; 2087 } 2088 2089 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2090 struct btrfs_root *root, 2091 struct btrfs_root *log, 2092 struct btrfs_path *path, 2093 const u64 ino) 2094 { 2095 struct btrfs_key search_key; 2096 struct btrfs_path *log_path; 2097 int i; 2098 int nritems; 2099 int ret; 2100 2101 log_path = btrfs_alloc_path(); 2102 if (!log_path) 2103 return -ENOMEM; 2104 2105 search_key.objectid = ino; 2106 search_key.type = BTRFS_XATTR_ITEM_KEY; 2107 search_key.offset = 0; 2108 again: 2109 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2110 if (ret < 0) 2111 goto out; 2112 process_leaf: 2113 nritems = btrfs_header_nritems(path->nodes[0]); 2114 for (i = path->slots[0]; i < nritems; i++) { 2115 struct btrfs_key key; 2116 struct btrfs_dir_item *di; 2117 struct btrfs_dir_item *log_di; 2118 u32 total_size; 2119 u32 cur; 2120 2121 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2122 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2123 ret = 0; 2124 goto out; 2125 } 2126 2127 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2128 total_size = btrfs_item_size_nr(path->nodes[0], i); 2129 cur = 0; 2130 while (cur < total_size) { 2131 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2132 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2133 u32 this_len = sizeof(*di) + name_len + data_len; 2134 char *name; 2135 2136 name = kmalloc(name_len, GFP_NOFS); 2137 if (!name) { 2138 ret = -ENOMEM; 2139 goto out; 2140 } 2141 read_extent_buffer(path->nodes[0], name, 2142 (unsigned long)(di + 1), name_len); 2143 2144 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2145 name, name_len, 0); 2146 btrfs_release_path(log_path); 2147 if (!log_di) { 2148 /* Doesn't exist in log tree, so delete it. */ 2149 btrfs_release_path(path); 2150 di = btrfs_lookup_xattr(trans, root, path, ino, 2151 name, name_len, -1); 2152 kfree(name); 2153 if (IS_ERR(di)) { 2154 ret = PTR_ERR(di); 2155 goto out; 2156 } 2157 ASSERT(di); 2158 ret = btrfs_delete_one_dir_name(trans, root, 2159 path, di); 2160 if (ret) 2161 goto out; 2162 btrfs_release_path(path); 2163 search_key = key; 2164 goto again; 2165 } 2166 kfree(name); 2167 if (IS_ERR(log_di)) { 2168 ret = PTR_ERR(log_di); 2169 goto out; 2170 } 2171 cur += this_len; 2172 di = (struct btrfs_dir_item *)((char *)di + this_len); 2173 } 2174 } 2175 ret = btrfs_next_leaf(root, path); 2176 if (ret > 0) 2177 ret = 0; 2178 else if (ret == 0) 2179 goto process_leaf; 2180 out: 2181 btrfs_free_path(log_path); 2182 btrfs_release_path(path); 2183 return ret; 2184 } 2185 2186 2187 /* 2188 * deletion replay happens before we copy any new directory items 2189 * out of the log or out of backreferences from inodes. It 2190 * scans the log to find ranges of keys that log is authoritative for, 2191 * and then scans the directory to find items in those ranges that are 2192 * not present in the log. 2193 * 2194 * Anything we don't find in the log is unlinked and removed from the 2195 * directory. 2196 */ 2197 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2198 struct btrfs_root *root, 2199 struct btrfs_root *log, 2200 struct btrfs_path *path, 2201 u64 dirid, int del_all) 2202 { 2203 u64 range_start; 2204 u64 range_end; 2205 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2206 int ret = 0; 2207 struct btrfs_key dir_key; 2208 struct btrfs_key found_key; 2209 struct btrfs_path *log_path; 2210 struct inode *dir; 2211 2212 dir_key.objectid = dirid; 2213 dir_key.type = BTRFS_DIR_ITEM_KEY; 2214 log_path = btrfs_alloc_path(); 2215 if (!log_path) 2216 return -ENOMEM; 2217 2218 dir = read_one_inode(root, dirid); 2219 /* it isn't an error if the inode isn't there, that can happen 2220 * because we replay the deletes before we copy in the inode item 2221 * from the log 2222 */ 2223 if (!dir) { 2224 btrfs_free_path(log_path); 2225 return 0; 2226 } 2227 again: 2228 range_start = 0; 2229 range_end = 0; 2230 while (1) { 2231 if (del_all) 2232 range_end = (u64)-1; 2233 else { 2234 ret = find_dir_range(log, path, dirid, key_type, 2235 &range_start, &range_end); 2236 if (ret != 0) 2237 break; 2238 } 2239 2240 dir_key.offset = range_start; 2241 while (1) { 2242 int nritems; 2243 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2244 0, 0); 2245 if (ret < 0) 2246 goto out; 2247 2248 nritems = btrfs_header_nritems(path->nodes[0]); 2249 if (path->slots[0] >= nritems) { 2250 ret = btrfs_next_leaf(root, path); 2251 if (ret) 2252 break; 2253 } 2254 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2255 path->slots[0]); 2256 if (found_key.objectid != dirid || 2257 found_key.type != dir_key.type) 2258 goto next_type; 2259 2260 if (found_key.offset > range_end) 2261 break; 2262 2263 ret = check_item_in_log(trans, root, log, path, 2264 log_path, dir, 2265 &found_key); 2266 if (ret) 2267 goto out; 2268 if (found_key.offset == (u64)-1) 2269 break; 2270 dir_key.offset = found_key.offset + 1; 2271 } 2272 btrfs_release_path(path); 2273 if (range_end == (u64)-1) 2274 break; 2275 range_start = range_end + 1; 2276 } 2277 2278 next_type: 2279 ret = 0; 2280 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2281 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2282 dir_key.type = BTRFS_DIR_INDEX_KEY; 2283 btrfs_release_path(path); 2284 goto again; 2285 } 2286 out: 2287 btrfs_release_path(path); 2288 btrfs_free_path(log_path); 2289 iput(dir); 2290 return ret; 2291 } 2292 2293 /* 2294 * the process_func used to replay items from the log tree. This 2295 * gets called in two different stages. The first stage just looks 2296 * for inodes and makes sure they are all copied into the subvolume. 2297 * 2298 * The second stage copies all the other item types from the log into 2299 * the subvolume. The two stage approach is slower, but gets rid of 2300 * lots of complexity around inodes referencing other inodes that exist 2301 * only in the log (references come from either directory items or inode 2302 * back refs). 2303 */ 2304 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2305 struct walk_control *wc, u64 gen) 2306 { 2307 int nritems; 2308 struct btrfs_path *path; 2309 struct btrfs_root *root = wc->replay_dest; 2310 struct btrfs_key key; 2311 int level; 2312 int i; 2313 int ret; 2314 2315 ret = btrfs_read_buffer(eb, gen); 2316 if (ret) 2317 return ret; 2318 2319 level = btrfs_header_level(eb); 2320 2321 if (level != 0) 2322 return 0; 2323 2324 path = btrfs_alloc_path(); 2325 if (!path) 2326 return -ENOMEM; 2327 2328 nritems = btrfs_header_nritems(eb); 2329 for (i = 0; i < nritems; i++) { 2330 btrfs_item_key_to_cpu(eb, &key, i); 2331 2332 /* inode keys are done during the first stage */ 2333 if (key.type == BTRFS_INODE_ITEM_KEY && 2334 wc->stage == LOG_WALK_REPLAY_INODES) { 2335 struct btrfs_inode_item *inode_item; 2336 u32 mode; 2337 2338 inode_item = btrfs_item_ptr(eb, i, 2339 struct btrfs_inode_item); 2340 ret = replay_xattr_deletes(wc->trans, root, log, 2341 path, key.objectid); 2342 if (ret) 2343 break; 2344 mode = btrfs_inode_mode(eb, inode_item); 2345 if (S_ISDIR(mode)) { 2346 ret = replay_dir_deletes(wc->trans, 2347 root, log, path, key.objectid, 0); 2348 if (ret) 2349 break; 2350 } 2351 ret = overwrite_item(wc->trans, root, path, 2352 eb, i, &key); 2353 if (ret) 2354 break; 2355 2356 /* for regular files, make sure corresponding 2357 * orphan item exist. extents past the new EOF 2358 * will be truncated later by orphan cleanup. 2359 */ 2360 if (S_ISREG(mode)) { 2361 ret = insert_orphan_item(wc->trans, root, 2362 key.objectid); 2363 if (ret) 2364 break; 2365 } 2366 2367 ret = link_to_fixup_dir(wc->trans, root, 2368 path, key.objectid); 2369 if (ret) 2370 break; 2371 } 2372 2373 if (key.type == BTRFS_DIR_INDEX_KEY && 2374 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2375 ret = replay_one_dir_item(wc->trans, root, path, 2376 eb, i, &key); 2377 if (ret) 2378 break; 2379 } 2380 2381 if (wc->stage < LOG_WALK_REPLAY_ALL) 2382 continue; 2383 2384 /* these keys are simply copied */ 2385 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2386 ret = overwrite_item(wc->trans, root, path, 2387 eb, i, &key); 2388 if (ret) 2389 break; 2390 } else if (key.type == BTRFS_INODE_REF_KEY || 2391 key.type == BTRFS_INODE_EXTREF_KEY) { 2392 ret = add_inode_ref(wc->trans, root, log, path, 2393 eb, i, &key); 2394 if (ret && ret != -ENOENT) 2395 break; 2396 ret = 0; 2397 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2398 ret = replay_one_extent(wc->trans, root, path, 2399 eb, i, &key); 2400 if (ret) 2401 break; 2402 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2403 ret = replay_one_dir_item(wc->trans, root, path, 2404 eb, i, &key); 2405 if (ret) 2406 break; 2407 } 2408 } 2409 btrfs_free_path(path); 2410 return ret; 2411 } 2412 2413 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2414 struct btrfs_root *root, 2415 struct btrfs_path *path, int *level, 2416 struct walk_control *wc) 2417 { 2418 struct btrfs_fs_info *fs_info = root->fs_info; 2419 u64 root_owner; 2420 u64 bytenr; 2421 u64 ptr_gen; 2422 struct extent_buffer *next; 2423 struct extent_buffer *cur; 2424 struct extent_buffer *parent; 2425 u32 blocksize; 2426 int ret = 0; 2427 2428 WARN_ON(*level < 0); 2429 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2430 2431 while (*level > 0) { 2432 WARN_ON(*level < 0); 2433 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2434 cur = path->nodes[*level]; 2435 2436 WARN_ON(btrfs_header_level(cur) != *level); 2437 2438 if (path->slots[*level] >= 2439 btrfs_header_nritems(cur)) 2440 break; 2441 2442 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2443 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2444 blocksize = fs_info->nodesize; 2445 2446 parent = path->nodes[*level]; 2447 root_owner = btrfs_header_owner(parent); 2448 2449 next = btrfs_find_create_tree_block(fs_info, bytenr); 2450 if (IS_ERR(next)) 2451 return PTR_ERR(next); 2452 2453 if (*level == 1) { 2454 ret = wc->process_func(root, next, wc, ptr_gen); 2455 if (ret) { 2456 free_extent_buffer(next); 2457 return ret; 2458 } 2459 2460 path->slots[*level]++; 2461 if (wc->free) { 2462 ret = btrfs_read_buffer(next, ptr_gen); 2463 if (ret) { 2464 free_extent_buffer(next); 2465 return ret; 2466 } 2467 2468 if (trans) { 2469 btrfs_tree_lock(next); 2470 btrfs_set_lock_blocking(next); 2471 clean_tree_block(trans, fs_info, next); 2472 btrfs_wait_tree_block_writeback(next); 2473 btrfs_tree_unlock(next); 2474 } 2475 2476 WARN_ON(root_owner != 2477 BTRFS_TREE_LOG_OBJECTID); 2478 ret = btrfs_free_and_pin_reserved_extent( 2479 fs_info, bytenr, 2480 blocksize); 2481 if (ret) { 2482 free_extent_buffer(next); 2483 return ret; 2484 } 2485 } 2486 free_extent_buffer(next); 2487 continue; 2488 } 2489 ret = btrfs_read_buffer(next, ptr_gen); 2490 if (ret) { 2491 free_extent_buffer(next); 2492 return ret; 2493 } 2494 2495 WARN_ON(*level <= 0); 2496 if (path->nodes[*level-1]) 2497 free_extent_buffer(path->nodes[*level-1]); 2498 path->nodes[*level-1] = next; 2499 *level = btrfs_header_level(next); 2500 path->slots[*level] = 0; 2501 cond_resched(); 2502 } 2503 WARN_ON(*level < 0); 2504 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2505 2506 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2507 2508 cond_resched(); 2509 return 0; 2510 } 2511 2512 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2513 struct btrfs_root *root, 2514 struct btrfs_path *path, int *level, 2515 struct walk_control *wc) 2516 { 2517 struct btrfs_fs_info *fs_info = root->fs_info; 2518 u64 root_owner; 2519 int i; 2520 int slot; 2521 int ret; 2522 2523 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2524 slot = path->slots[i]; 2525 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2526 path->slots[i]++; 2527 *level = i; 2528 WARN_ON(*level == 0); 2529 return 0; 2530 } else { 2531 struct extent_buffer *parent; 2532 if (path->nodes[*level] == root->node) 2533 parent = path->nodes[*level]; 2534 else 2535 parent = path->nodes[*level + 1]; 2536 2537 root_owner = btrfs_header_owner(parent); 2538 ret = wc->process_func(root, path->nodes[*level], wc, 2539 btrfs_header_generation(path->nodes[*level])); 2540 if (ret) 2541 return ret; 2542 2543 if (wc->free) { 2544 struct extent_buffer *next; 2545 2546 next = path->nodes[*level]; 2547 2548 if (trans) { 2549 btrfs_tree_lock(next); 2550 btrfs_set_lock_blocking(next); 2551 clean_tree_block(trans, fs_info, next); 2552 btrfs_wait_tree_block_writeback(next); 2553 btrfs_tree_unlock(next); 2554 } 2555 2556 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2557 ret = btrfs_free_and_pin_reserved_extent( 2558 fs_info, 2559 path->nodes[*level]->start, 2560 path->nodes[*level]->len); 2561 if (ret) 2562 return ret; 2563 } 2564 free_extent_buffer(path->nodes[*level]); 2565 path->nodes[*level] = NULL; 2566 *level = i + 1; 2567 } 2568 } 2569 return 1; 2570 } 2571 2572 /* 2573 * drop the reference count on the tree rooted at 'snap'. This traverses 2574 * the tree freeing any blocks that have a ref count of zero after being 2575 * decremented. 2576 */ 2577 static int walk_log_tree(struct btrfs_trans_handle *trans, 2578 struct btrfs_root *log, struct walk_control *wc) 2579 { 2580 struct btrfs_fs_info *fs_info = log->fs_info; 2581 int ret = 0; 2582 int wret; 2583 int level; 2584 struct btrfs_path *path; 2585 int orig_level; 2586 2587 path = btrfs_alloc_path(); 2588 if (!path) 2589 return -ENOMEM; 2590 2591 level = btrfs_header_level(log->node); 2592 orig_level = level; 2593 path->nodes[level] = log->node; 2594 extent_buffer_get(log->node); 2595 path->slots[level] = 0; 2596 2597 while (1) { 2598 wret = walk_down_log_tree(trans, log, path, &level, wc); 2599 if (wret > 0) 2600 break; 2601 if (wret < 0) { 2602 ret = wret; 2603 goto out; 2604 } 2605 2606 wret = walk_up_log_tree(trans, log, path, &level, wc); 2607 if (wret > 0) 2608 break; 2609 if (wret < 0) { 2610 ret = wret; 2611 goto out; 2612 } 2613 } 2614 2615 /* was the root node processed? if not, catch it here */ 2616 if (path->nodes[orig_level]) { 2617 ret = wc->process_func(log, path->nodes[orig_level], wc, 2618 btrfs_header_generation(path->nodes[orig_level])); 2619 if (ret) 2620 goto out; 2621 if (wc->free) { 2622 struct extent_buffer *next; 2623 2624 next = path->nodes[orig_level]; 2625 2626 if (trans) { 2627 btrfs_tree_lock(next); 2628 btrfs_set_lock_blocking(next); 2629 clean_tree_block(trans, fs_info, next); 2630 btrfs_wait_tree_block_writeback(next); 2631 btrfs_tree_unlock(next); 2632 } 2633 2634 WARN_ON(log->root_key.objectid != 2635 BTRFS_TREE_LOG_OBJECTID); 2636 ret = btrfs_free_and_pin_reserved_extent(fs_info, 2637 next->start, next->len); 2638 if (ret) 2639 goto out; 2640 } 2641 } 2642 2643 out: 2644 btrfs_free_path(path); 2645 return ret; 2646 } 2647 2648 /* 2649 * helper function to update the item for a given subvolumes log root 2650 * in the tree of log roots 2651 */ 2652 static int update_log_root(struct btrfs_trans_handle *trans, 2653 struct btrfs_root *log) 2654 { 2655 struct btrfs_fs_info *fs_info = log->fs_info; 2656 int ret; 2657 2658 if (log->log_transid == 1) { 2659 /* insert root item on the first sync */ 2660 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2661 &log->root_key, &log->root_item); 2662 } else { 2663 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2664 &log->root_key, &log->root_item); 2665 } 2666 return ret; 2667 } 2668 2669 static void wait_log_commit(struct btrfs_root *root, int transid) 2670 { 2671 DEFINE_WAIT(wait); 2672 int index = transid % 2; 2673 2674 /* 2675 * we only allow two pending log transactions at a time, 2676 * so we know that if ours is more than 2 older than the 2677 * current transaction, we're done 2678 */ 2679 do { 2680 prepare_to_wait(&root->log_commit_wait[index], 2681 &wait, TASK_UNINTERRUPTIBLE); 2682 mutex_unlock(&root->log_mutex); 2683 2684 if (root->log_transid_committed < transid && 2685 atomic_read(&root->log_commit[index])) 2686 schedule(); 2687 2688 finish_wait(&root->log_commit_wait[index], &wait); 2689 mutex_lock(&root->log_mutex); 2690 } while (root->log_transid_committed < transid && 2691 atomic_read(&root->log_commit[index])); 2692 } 2693 2694 static void wait_for_writer(struct btrfs_root *root) 2695 { 2696 DEFINE_WAIT(wait); 2697 2698 while (atomic_read(&root->log_writers)) { 2699 prepare_to_wait(&root->log_writer_wait, 2700 &wait, TASK_UNINTERRUPTIBLE); 2701 mutex_unlock(&root->log_mutex); 2702 if (atomic_read(&root->log_writers)) 2703 schedule(); 2704 finish_wait(&root->log_writer_wait, &wait); 2705 mutex_lock(&root->log_mutex); 2706 } 2707 } 2708 2709 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2710 struct btrfs_log_ctx *ctx) 2711 { 2712 if (!ctx) 2713 return; 2714 2715 mutex_lock(&root->log_mutex); 2716 list_del_init(&ctx->list); 2717 mutex_unlock(&root->log_mutex); 2718 } 2719 2720 /* 2721 * Invoked in log mutex context, or be sure there is no other task which 2722 * can access the list. 2723 */ 2724 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 2725 int index, int error) 2726 { 2727 struct btrfs_log_ctx *ctx; 2728 struct btrfs_log_ctx *safe; 2729 2730 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 2731 list_del_init(&ctx->list); 2732 ctx->log_ret = error; 2733 } 2734 2735 INIT_LIST_HEAD(&root->log_ctxs[index]); 2736 } 2737 2738 /* 2739 * btrfs_sync_log does sends a given tree log down to the disk and 2740 * updates the super blocks to record it. When this call is done, 2741 * you know that any inodes previously logged are safely on disk only 2742 * if it returns 0. 2743 * 2744 * Any other return value means you need to call btrfs_commit_transaction. 2745 * Some of the edge cases for fsyncing directories that have had unlinks 2746 * or renames done in the past mean that sometimes the only safe 2747 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 2748 * that has happened. 2749 */ 2750 int btrfs_sync_log(struct btrfs_trans_handle *trans, 2751 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 2752 { 2753 int index1; 2754 int index2; 2755 int mark; 2756 int ret; 2757 struct btrfs_fs_info *fs_info = root->fs_info; 2758 struct btrfs_root *log = root->log_root; 2759 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 2760 int log_transid = 0; 2761 struct btrfs_log_ctx root_log_ctx; 2762 struct blk_plug plug; 2763 2764 mutex_lock(&root->log_mutex); 2765 log_transid = ctx->log_transid; 2766 if (root->log_transid_committed >= log_transid) { 2767 mutex_unlock(&root->log_mutex); 2768 return ctx->log_ret; 2769 } 2770 2771 index1 = log_transid % 2; 2772 if (atomic_read(&root->log_commit[index1])) { 2773 wait_log_commit(root, log_transid); 2774 mutex_unlock(&root->log_mutex); 2775 return ctx->log_ret; 2776 } 2777 ASSERT(log_transid == root->log_transid); 2778 atomic_set(&root->log_commit[index1], 1); 2779 2780 /* wait for previous tree log sync to complete */ 2781 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 2782 wait_log_commit(root, log_transid - 1); 2783 2784 while (1) { 2785 int batch = atomic_read(&root->log_batch); 2786 /* when we're on an ssd, just kick the log commit out */ 2787 if (!btrfs_test_opt(fs_info, SSD) && 2788 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 2789 mutex_unlock(&root->log_mutex); 2790 schedule_timeout_uninterruptible(1); 2791 mutex_lock(&root->log_mutex); 2792 } 2793 wait_for_writer(root); 2794 if (batch == atomic_read(&root->log_batch)) 2795 break; 2796 } 2797 2798 /* bail out if we need to do a full commit */ 2799 if (btrfs_need_log_full_commit(fs_info, trans)) { 2800 ret = -EAGAIN; 2801 btrfs_free_logged_extents(log, log_transid); 2802 mutex_unlock(&root->log_mutex); 2803 goto out; 2804 } 2805 2806 if (log_transid % 2 == 0) 2807 mark = EXTENT_DIRTY; 2808 else 2809 mark = EXTENT_NEW; 2810 2811 /* we start IO on all the marked extents here, but we don't actually 2812 * wait for them until later. 2813 */ 2814 blk_start_plug(&plug); 2815 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 2816 if (ret) { 2817 blk_finish_plug(&plug); 2818 btrfs_abort_transaction(trans, ret); 2819 btrfs_free_logged_extents(log, log_transid); 2820 btrfs_set_log_full_commit(fs_info, trans); 2821 mutex_unlock(&root->log_mutex); 2822 goto out; 2823 } 2824 2825 btrfs_set_root_node(&log->root_item, log->node); 2826 2827 root->log_transid++; 2828 log->log_transid = root->log_transid; 2829 root->log_start_pid = 0; 2830 /* 2831 * IO has been started, blocks of the log tree have WRITTEN flag set 2832 * in their headers. new modifications of the log will be written to 2833 * new positions. so it's safe to allow log writers to go in. 2834 */ 2835 mutex_unlock(&root->log_mutex); 2836 2837 btrfs_init_log_ctx(&root_log_ctx, NULL); 2838 2839 mutex_lock(&log_root_tree->log_mutex); 2840 atomic_inc(&log_root_tree->log_batch); 2841 atomic_inc(&log_root_tree->log_writers); 2842 2843 index2 = log_root_tree->log_transid % 2; 2844 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 2845 root_log_ctx.log_transid = log_root_tree->log_transid; 2846 2847 mutex_unlock(&log_root_tree->log_mutex); 2848 2849 ret = update_log_root(trans, log); 2850 2851 mutex_lock(&log_root_tree->log_mutex); 2852 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2853 /* 2854 * Implicit memory barrier after atomic_dec_and_test 2855 */ 2856 if (waitqueue_active(&log_root_tree->log_writer_wait)) 2857 wake_up(&log_root_tree->log_writer_wait); 2858 } 2859 2860 if (ret) { 2861 if (!list_empty(&root_log_ctx.list)) 2862 list_del_init(&root_log_ctx.list); 2863 2864 blk_finish_plug(&plug); 2865 btrfs_set_log_full_commit(fs_info, trans); 2866 2867 if (ret != -ENOSPC) { 2868 btrfs_abort_transaction(trans, ret); 2869 mutex_unlock(&log_root_tree->log_mutex); 2870 goto out; 2871 } 2872 btrfs_wait_tree_log_extents(log, mark); 2873 btrfs_free_logged_extents(log, log_transid); 2874 mutex_unlock(&log_root_tree->log_mutex); 2875 ret = -EAGAIN; 2876 goto out; 2877 } 2878 2879 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 2880 blk_finish_plug(&plug); 2881 list_del_init(&root_log_ctx.list); 2882 mutex_unlock(&log_root_tree->log_mutex); 2883 ret = root_log_ctx.log_ret; 2884 goto out; 2885 } 2886 2887 index2 = root_log_ctx.log_transid % 2; 2888 if (atomic_read(&log_root_tree->log_commit[index2])) { 2889 blk_finish_plug(&plug); 2890 ret = btrfs_wait_tree_log_extents(log, mark); 2891 btrfs_wait_logged_extents(trans, log, log_transid); 2892 wait_log_commit(log_root_tree, 2893 root_log_ctx.log_transid); 2894 mutex_unlock(&log_root_tree->log_mutex); 2895 if (!ret) 2896 ret = root_log_ctx.log_ret; 2897 goto out; 2898 } 2899 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 2900 atomic_set(&log_root_tree->log_commit[index2], 1); 2901 2902 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 2903 wait_log_commit(log_root_tree, 2904 root_log_ctx.log_transid - 1); 2905 } 2906 2907 wait_for_writer(log_root_tree); 2908 2909 /* 2910 * now that we've moved on to the tree of log tree roots, 2911 * check the full commit flag again 2912 */ 2913 if (btrfs_need_log_full_commit(fs_info, trans)) { 2914 blk_finish_plug(&plug); 2915 btrfs_wait_tree_log_extents(log, mark); 2916 btrfs_free_logged_extents(log, log_transid); 2917 mutex_unlock(&log_root_tree->log_mutex); 2918 ret = -EAGAIN; 2919 goto out_wake_log_root; 2920 } 2921 2922 ret = btrfs_write_marked_extents(fs_info, 2923 &log_root_tree->dirty_log_pages, 2924 EXTENT_DIRTY | EXTENT_NEW); 2925 blk_finish_plug(&plug); 2926 if (ret) { 2927 btrfs_set_log_full_commit(fs_info, trans); 2928 btrfs_abort_transaction(trans, ret); 2929 btrfs_free_logged_extents(log, log_transid); 2930 mutex_unlock(&log_root_tree->log_mutex); 2931 goto out_wake_log_root; 2932 } 2933 ret = btrfs_wait_tree_log_extents(log, mark); 2934 if (!ret) 2935 ret = btrfs_wait_tree_log_extents(log_root_tree, 2936 EXTENT_NEW | EXTENT_DIRTY); 2937 if (ret) { 2938 btrfs_set_log_full_commit(fs_info, trans); 2939 btrfs_free_logged_extents(log, log_transid); 2940 mutex_unlock(&log_root_tree->log_mutex); 2941 goto out_wake_log_root; 2942 } 2943 btrfs_wait_logged_extents(trans, log, log_transid); 2944 2945 btrfs_set_super_log_root(fs_info->super_for_commit, 2946 log_root_tree->node->start); 2947 btrfs_set_super_log_root_level(fs_info->super_for_commit, 2948 btrfs_header_level(log_root_tree->node)); 2949 2950 log_root_tree->log_transid++; 2951 mutex_unlock(&log_root_tree->log_mutex); 2952 2953 /* 2954 * nobody else is going to jump in and write the the ctree 2955 * super here because the log_commit atomic below is protecting 2956 * us. We must be called with a transaction handle pinning 2957 * the running transaction open, so a full commit can't hop 2958 * in and cause problems either. 2959 */ 2960 ret = write_ctree_super(trans, fs_info, 1); 2961 if (ret) { 2962 btrfs_set_log_full_commit(fs_info, trans); 2963 btrfs_abort_transaction(trans, ret); 2964 goto out_wake_log_root; 2965 } 2966 2967 mutex_lock(&root->log_mutex); 2968 if (root->last_log_commit < log_transid) 2969 root->last_log_commit = log_transid; 2970 mutex_unlock(&root->log_mutex); 2971 2972 out_wake_log_root: 2973 mutex_lock(&log_root_tree->log_mutex); 2974 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 2975 2976 log_root_tree->log_transid_committed++; 2977 atomic_set(&log_root_tree->log_commit[index2], 0); 2978 mutex_unlock(&log_root_tree->log_mutex); 2979 2980 /* 2981 * The barrier before waitqueue_active is implied by mutex_unlock 2982 */ 2983 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2984 wake_up(&log_root_tree->log_commit_wait[index2]); 2985 out: 2986 mutex_lock(&root->log_mutex); 2987 btrfs_remove_all_log_ctxs(root, index1, ret); 2988 root->log_transid_committed++; 2989 atomic_set(&root->log_commit[index1], 0); 2990 mutex_unlock(&root->log_mutex); 2991 2992 /* 2993 * The barrier before waitqueue_active is implied by mutex_unlock 2994 */ 2995 if (waitqueue_active(&root->log_commit_wait[index1])) 2996 wake_up(&root->log_commit_wait[index1]); 2997 return ret; 2998 } 2999 3000 static void free_log_tree(struct btrfs_trans_handle *trans, 3001 struct btrfs_root *log) 3002 { 3003 int ret; 3004 u64 start; 3005 u64 end; 3006 struct walk_control wc = { 3007 .free = 1, 3008 .process_func = process_one_buffer 3009 }; 3010 3011 ret = walk_log_tree(trans, log, &wc); 3012 /* I don't think this can happen but just in case */ 3013 if (ret) 3014 btrfs_abort_transaction(trans, ret); 3015 3016 while (1) { 3017 ret = find_first_extent_bit(&log->dirty_log_pages, 3018 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, 3019 NULL); 3020 if (ret) 3021 break; 3022 3023 clear_extent_bits(&log->dirty_log_pages, start, end, 3024 EXTENT_DIRTY | EXTENT_NEW); 3025 } 3026 3027 /* 3028 * We may have short-circuited the log tree with the full commit logic 3029 * and left ordered extents on our list, so clear these out to keep us 3030 * from leaking inodes and memory. 3031 */ 3032 btrfs_free_logged_extents(log, 0); 3033 btrfs_free_logged_extents(log, 1); 3034 3035 free_extent_buffer(log->node); 3036 kfree(log); 3037 } 3038 3039 /* 3040 * free all the extents used by the tree log. This should be called 3041 * at commit time of the full transaction 3042 */ 3043 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3044 { 3045 if (root->log_root) { 3046 free_log_tree(trans, root->log_root); 3047 root->log_root = NULL; 3048 } 3049 return 0; 3050 } 3051 3052 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3053 struct btrfs_fs_info *fs_info) 3054 { 3055 if (fs_info->log_root_tree) { 3056 free_log_tree(trans, fs_info->log_root_tree); 3057 fs_info->log_root_tree = NULL; 3058 } 3059 return 0; 3060 } 3061 3062 /* 3063 * If both a file and directory are logged, and unlinks or renames are 3064 * mixed in, we have a few interesting corners: 3065 * 3066 * create file X in dir Y 3067 * link file X to X.link in dir Y 3068 * fsync file X 3069 * unlink file X but leave X.link 3070 * fsync dir Y 3071 * 3072 * After a crash we would expect only X.link to exist. But file X 3073 * didn't get fsync'd again so the log has back refs for X and X.link. 3074 * 3075 * We solve this by removing directory entries and inode backrefs from the 3076 * log when a file that was logged in the current transaction is 3077 * unlinked. Any later fsync will include the updated log entries, and 3078 * we'll be able to reconstruct the proper directory items from backrefs. 3079 * 3080 * This optimizations allows us to avoid relogging the entire inode 3081 * or the entire directory. 3082 */ 3083 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3084 struct btrfs_root *root, 3085 const char *name, int name_len, 3086 struct inode *dir, u64 index) 3087 { 3088 struct btrfs_root *log; 3089 struct btrfs_dir_item *di; 3090 struct btrfs_path *path; 3091 int ret; 3092 int err = 0; 3093 int bytes_del = 0; 3094 u64 dir_ino = btrfs_ino(dir); 3095 3096 if (BTRFS_I(dir)->logged_trans < trans->transid) 3097 return 0; 3098 3099 ret = join_running_log_trans(root); 3100 if (ret) 3101 return 0; 3102 3103 mutex_lock(&BTRFS_I(dir)->log_mutex); 3104 3105 log = root->log_root; 3106 path = btrfs_alloc_path(); 3107 if (!path) { 3108 err = -ENOMEM; 3109 goto out_unlock; 3110 } 3111 3112 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3113 name, name_len, -1); 3114 if (IS_ERR(di)) { 3115 err = PTR_ERR(di); 3116 goto fail; 3117 } 3118 if (di) { 3119 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3120 bytes_del += name_len; 3121 if (ret) { 3122 err = ret; 3123 goto fail; 3124 } 3125 } 3126 btrfs_release_path(path); 3127 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3128 index, name, name_len, -1); 3129 if (IS_ERR(di)) { 3130 err = PTR_ERR(di); 3131 goto fail; 3132 } 3133 if (di) { 3134 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3135 bytes_del += name_len; 3136 if (ret) { 3137 err = ret; 3138 goto fail; 3139 } 3140 } 3141 3142 /* update the directory size in the log to reflect the names 3143 * we have removed 3144 */ 3145 if (bytes_del) { 3146 struct btrfs_key key; 3147 3148 key.objectid = dir_ino; 3149 key.offset = 0; 3150 key.type = BTRFS_INODE_ITEM_KEY; 3151 btrfs_release_path(path); 3152 3153 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 3154 if (ret < 0) { 3155 err = ret; 3156 goto fail; 3157 } 3158 if (ret == 0) { 3159 struct btrfs_inode_item *item; 3160 u64 i_size; 3161 3162 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3163 struct btrfs_inode_item); 3164 i_size = btrfs_inode_size(path->nodes[0], item); 3165 if (i_size > bytes_del) 3166 i_size -= bytes_del; 3167 else 3168 i_size = 0; 3169 btrfs_set_inode_size(path->nodes[0], item, i_size); 3170 btrfs_mark_buffer_dirty(path->nodes[0]); 3171 } else 3172 ret = 0; 3173 btrfs_release_path(path); 3174 } 3175 fail: 3176 btrfs_free_path(path); 3177 out_unlock: 3178 mutex_unlock(&BTRFS_I(dir)->log_mutex); 3179 if (ret == -ENOSPC) { 3180 btrfs_set_log_full_commit(root->fs_info, trans); 3181 ret = 0; 3182 } else if (ret < 0) 3183 btrfs_abort_transaction(trans, ret); 3184 3185 btrfs_end_log_trans(root); 3186 3187 return err; 3188 } 3189 3190 /* see comments for btrfs_del_dir_entries_in_log */ 3191 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3192 struct btrfs_root *root, 3193 const char *name, int name_len, 3194 struct inode *inode, u64 dirid) 3195 { 3196 struct btrfs_fs_info *fs_info = root->fs_info; 3197 struct btrfs_root *log; 3198 u64 index; 3199 int ret; 3200 3201 if (BTRFS_I(inode)->logged_trans < trans->transid) 3202 return 0; 3203 3204 ret = join_running_log_trans(root); 3205 if (ret) 3206 return 0; 3207 log = root->log_root; 3208 mutex_lock(&BTRFS_I(inode)->log_mutex); 3209 3210 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3211 dirid, &index); 3212 mutex_unlock(&BTRFS_I(inode)->log_mutex); 3213 if (ret == -ENOSPC) { 3214 btrfs_set_log_full_commit(fs_info, trans); 3215 ret = 0; 3216 } else if (ret < 0 && ret != -ENOENT) 3217 btrfs_abort_transaction(trans, ret); 3218 btrfs_end_log_trans(root); 3219 3220 return ret; 3221 } 3222 3223 /* 3224 * creates a range item in the log for 'dirid'. first_offset and 3225 * last_offset tell us which parts of the key space the log should 3226 * be considered authoritative for. 3227 */ 3228 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3229 struct btrfs_root *log, 3230 struct btrfs_path *path, 3231 int key_type, u64 dirid, 3232 u64 first_offset, u64 last_offset) 3233 { 3234 int ret; 3235 struct btrfs_key key; 3236 struct btrfs_dir_log_item *item; 3237 3238 key.objectid = dirid; 3239 key.offset = first_offset; 3240 if (key_type == BTRFS_DIR_ITEM_KEY) 3241 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3242 else 3243 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3244 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3245 if (ret) 3246 return ret; 3247 3248 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3249 struct btrfs_dir_log_item); 3250 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3251 btrfs_mark_buffer_dirty(path->nodes[0]); 3252 btrfs_release_path(path); 3253 return 0; 3254 } 3255 3256 /* 3257 * log all the items included in the current transaction for a given 3258 * directory. This also creates the range items in the log tree required 3259 * to replay anything deleted before the fsync 3260 */ 3261 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3262 struct btrfs_root *root, struct inode *inode, 3263 struct btrfs_path *path, 3264 struct btrfs_path *dst_path, int key_type, 3265 struct btrfs_log_ctx *ctx, 3266 u64 min_offset, u64 *last_offset_ret) 3267 { 3268 struct btrfs_key min_key; 3269 struct btrfs_root *log = root->log_root; 3270 struct extent_buffer *src; 3271 int err = 0; 3272 int ret; 3273 int i; 3274 int nritems; 3275 u64 first_offset = min_offset; 3276 u64 last_offset = (u64)-1; 3277 u64 ino = btrfs_ino(inode); 3278 3279 log = root->log_root; 3280 3281 min_key.objectid = ino; 3282 min_key.type = key_type; 3283 min_key.offset = min_offset; 3284 3285 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3286 3287 /* 3288 * we didn't find anything from this transaction, see if there 3289 * is anything at all 3290 */ 3291 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3292 min_key.objectid = ino; 3293 min_key.type = key_type; 3294 min_key.offset = (u64)-1; 3295 btrfs_release_path(path); 3296 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3297 if (ret < 0) { 3298 btrfs_release_path(path); 3299 return ret; 3300 } 3301 ret = btrfs_previous_item(root, path, ino, key_type); 3302 3303 /* if ret == 0 there are items for this type, 3304 * create a range to tell us the last key of this type. 3305 * otherwise, there are no items in this directory after 3306 * *min_offset, and we create a range to indicate that. 3307 */ 3308 if (ret == 0) { 3309 struct btrfs_key tmp; 3310 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3311 path->slots[0]); 3312 if (key_type == tmp.type) 3313 first_offset = max(min_offset, tmp.offset) + 1; 3314 } 3315 goto done; 3316 } 3317 3318 /* go backward to find any previous key */ 3319 ret = btrfs_previous_item(root, path, ino, key_type); 3320 if (ret == 0) { 3321 struct btrfs_key tmp; 3322 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3323 if (key_type == tmp.type) { 3324 first_offset = tmp.offset; 3325 ret = overwrite_item(trans, log, dst_path, 3326 path->nodes[0], path->slots[0], 3327 &tmp); 3328 if (ret) { 3329 err = ret; 3330 goto done; 3331 } 3332 } 3333 } 3334 btrfs_release_path(path); 3335 3336 /* find the first key from this transaction again */ 3337 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3338 if (WARN_ON(ret != 0)) 3339 goto done; 3340 3341 /* 3342 * we have a block from this transaction, log every item in it 3343 * from our directory 3344 */ 3345 while (1) { 3346 struct btrfs_key tmp; 3347 src = path->nodes[0]; 3348 nritems = btrfs_header_nritems(src); 3349 for (i = path->slots[0]; i < nritems; i++) { 3350 struct btrfs_dir_item *di; 3351 3352 btrfs_item_key_to_cpu(src, &min_key, i); 3353 3354 if (min_key.objectid != ino || min_key.type != key_type) 3355 goto done; 3356 ret = overwrite_item(trans, log, dst_path, src, i, 3357 &min_key); 3358 if (ret) { 3359 err = ret; 3360 goto done; 3361 } 3362 3363 /* 3364 * We must make sure that when we log a directory entry, 3365 * the corresponding inode, after log replay, has a 3366 * matching link count. For example: 3367 * 3368 * touch foo 3369 * mkdir mydir 3370 * sync 3371 * ln foo mydir/bar 3372 * xfs_io -c "fsync" mydir 3373 * <crash> 3374 * <mount fs and log replay> 3375 * 3376 * Would result in a fsync log that when replayed, our 3377 * file inode would have a link count of 1, but we get 3378 * two directory entries pointing to the same inode. 3379 * After removing one of the names, it would not be 3380 * possible to remove the other name, which resulted 3381 * always in stale file handle errors, and would not 3382 * be possible to rmdir the parent directory, since 3383 * its i_size could never decrement to the value 3384 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3385 */ 3386 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3387 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3388 if (ctx && 3389 (btrfs_dir_transid(src, di) == trans->transid || 3390 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3391 tmp.type != BTRFS_ROOT_ITEM_KEY) 3392 ctx->log_new_dentries = true; 3393 } 3394 path->slots[0] = nritems; 3395 3396 /* 3397 * look ahead to the next item and see if it is also 3398 * from this directory and from this transaction 3399 */ 3400 ret = btrfs_next_leaf(root, path); 3401 if (ret == 1) { 3402 last_offset = (u64)-1; 3403 goto done; 3404 } 3405 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3406 if (tmp.objectid != ino || tmp.type != key_type) { 3407 last_offset = (u64)-1; 3408 goto done; 3409 } 3410 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3411 ret = overwrite_item(trans, log, dst_path, 3412 path->nodes[0], path->slots[0], 3413 &tmp); 3414 if (ret) 3415 err = ret; 3416 else 3417 last_offset = tmp.offset; 3418 goto done; 3419 } 3420 } 3421 done: 3422 btrfs_release_path(path); 3423 btrfs_release_path(dst_path); 3424 3425 if (err == 0) { 3426 *last_offset_ret = last_offset; 3427 /* 3428 * insert the log range keys to indicate where the log 3429 * is valid 3430 */ 3431 ret = insert_dir_log_key(trans, log, path, key_type, 3432 ino, first_offset, last_offset); 3433 if (ret) 3434 err = ret; 3435 } 3436 return err; 3437 } 3438 3439 /* 3440 * logging directories is very similar to logging inodes, We find all the items 3441 * from the current transaction and write them to the log. 3442 * 3443 * The recovery code scans the directory in the subvolume, and if it finds a 3444 * key in the range logged that is not present in the log tree, then it means 3445 * that dir entry was unlinked during the transaction. 3446 * 3447 * In order for that scan to work, we must include one key smaller than 3448 * the smallest logged by this transaction and one key larger than the largest 3449 * key logged by this transaction. 3450 */ 3451 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3452 struct btrfs_root *root, struct inode *inode, 3453 struct btrfs_path *path, 3454 struct btrfs_path *dst_path, 3455 struct btrfs_log_ctx *ctx) 3456 { 3457 u64 min_key; 3458 u64 max_key; 3459 int ret; 3460 int key_type = BTRFS_DIR_ITEM_KEY; 3461 3462 again: 3463 min_key = 0; 3464 max_key = 0; 3465 while (1) { 3466 ret = log_dir_items(trans, root, inode, path, 3467 dst_path, key_type, ctx, min_key, 3468 &max_key); 3469 if (ret) 3470 return ret; 3471 if (max_key == (u64)-1) 3472 break; 3473 min_key = max_key + 1; 3474 } 3475 3476 if (key_type == BTRFS_DIR_ITEM_KEY) { 3477 key_type = BTRFS_DIR_INDEX_KEY; 3478 goto again; 3479 } 3480 return 0; 3481 } 3482 3483 /* 3484 * a helper function to drop items from the log before we relog an 3485 * inode. max_key_type indicates the highest item type to remove. 3486 * This cannot be run for file data extents because it does not 3487 * free the extents they point to. 3488 */ 3489 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3490 struct btrfs_root *log, 3491 struct btrfs_path *path, 3492 u64 objectid, int max_key_type) 3493 { 3494 int ret; 3495 struct btrfs_key key; 3496 struct btrfs_key found_key; 3497 int start_slot; 3498 3499 key.objectid = objectid; 3500 key.type = max_key_type; 3501 key.offset = (u64)-1; 3502 3503 while (1) { 3504 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3505 BUG_ON(ret == 0); /* Logic error */ 3506 if (ret < 0) 3507 break; 3508 3509 if (path->slots[0] == 0) 3510 break; 3511 3512 path->slots[0]--; 3513 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3514 path->slots[0]); 3515 3516 if (found_key.objectid != objectid) 3517 break; 3518 3519 found_key.offset = 0; 3520 found_key.type = 0; 3521 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3522 &start_slot); 3523 3524 ret = btrfs_del_items(trans, log, path, start_slot, 3525 path->slots[0] - start_slot + 1); 3526 /* 3527 * If start slot isn't 0 then we don't need to re-search, we've 3528 * found the last guy with the objectid in this tree. 3529 */ 3530 if (ret || start_slot != 0) 3531 break; 3532 btrfs_release_path(path); 3533 } 3534 btrfs_release_path(path); 3535 if (ret > 0) 3536 ret = 0; 3537 return ret; 3538 } 3539 3540 static void fill_inode_item(struct btrfs_trans_handle *trans, 3541 struct extent_buffer *leaf, 3542 struct btrfs_inode_item *item, 3543 struct inode *inode, int log_inode_only, 3544 u64 logged_isize) 3545 { 3546 struct btrfs_map_token token; 3547 3548 btrfs_init_map_token(&token); 3549 3550 if (log_inode_only) { 3551 /* set the generation to zero so the recover code 3552 * can tell the difference between an logging 3553 * just to say 'this inode exists' and a logging 3554 * to say 'update this inode with these values' 3555 */ 3556 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3557 btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3558 } else { 3559 btrfs_set_token_inode_generation(leaf, item, 3560 BTRFS_I(inode)->generation, 3561 &token); 3562 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3563 } 3564 3565 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3566 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3567 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3568 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3569 3570 btrfs_set_token_timespec_sec(leaf, &item->atime, 3571 inode->i_atime.tv_sec, &token); 3572 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3573 inode->i_atime.tv_nsec, &token); 3574 3575 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3576 inode->i_mtime.tv_sec, &token); 3577 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3578 inode->i_mtime.tv_nsec, &token); 3579 3580 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3581 inode->i_ctime.tv_sec, &token); 3582 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3583 inode->i_ctime.tv_nsec, &token); 3584 3585 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3586 &token); 3587 3588 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3589 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3590 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3591 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3592 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3593 } 3594 3595 static int log_inode_item(struct btrfs_trans_handle *trans, 3596 struct btrfs_root *log, struct btrfs_path *path, 3597 struct inode *inode) 3598 { 3599 struct btrfs_inode_item *inode_item; 3600 int ret; 3601 3602 ret = btrfs_insert_empty_item(trans, log, path, 3603 &BTRFS_I(inode)->location, 3604 sizeof(*inode_item)); 3605 if (ret && ret != -EEXIST) 3606 return ret; 3607 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3608 struct btrfs_inode_item); 3609 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0); 3610 btrfs_release_path(path); 3611 return 0; 3612 } 3613 3614 static noinline int copy_items(struct btrfs_trans_handle *trans, 3615 struct inode *inode, 3616 struct btrfs_path *dst_path, 3617 struct btrfs_path *src_path, u64 *last_extent, 3618 int start_slot, int nr, int inode_only, 3619 u64 logged_isize) 3620 { 3621 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3622 unsigned long src_offset; 3623 unsigned long dst_offset; 3624 struct btrfs_root *log = BTRFS_I(inode)->root->log_root; 3625 struct btrfs_file_extent_item *extent; 3626 struct btrfs_inode_item *inode_item; 3627 struct extent_buffer *src = src_path->nodes[0]; 3628 struct btrfs_key first_key, last_key, key; 3629 int ret; 3630 struct btrfs_key *ins_keys; 3631 u32 *ins_sizes; 3632 char *ins_data; 3633 int i; 3634 struct list_head ordered_sums; 3635 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3636 bool has_extents = false; 3637 bool need_find_last_extent = true; 3638 bool done = false; 3639 3640 INIT_LIST_HEAD(&ordered_sums); 3641 3642 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3643 nr * sizeof(u32), GFP_NOFS); 3644 if (!ins_data) 3645 return -ENOMEM; 3646 3647 first_key.objectid = (u64)-1; 3648 3649 ins_sizes = (u32 *)ins_data; 3650 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3651 3652 for (i = 0; i < nr; i++) { 3653 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3654 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3655 } 3656 ret = btrfs_insert_empty_items(trans, log, dst_path, 3657 ins_keys, ins_sizes, nr); 3658 if (ret) { 3659 kfree(ins_data); 3660 return ret; 3661 } 3662 3663 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3664 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3665 dst_path->slots[0]); 3666 3667 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3668 3669 if ((i == (nr - 1))) 3670 last_key = ins_keys[i]; 3671 3672 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3673 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3674 dst_path->slots[0], 3675 struct btrfs_inode_item); 3676 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3677 inode, inode_only == LOG_INODE_EXISTS, 3678 logged_isize); 3679 } else { 3680 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3681 src_offset, ins_sizes[i]); 3682 } 3683 3684 /* 3685 * We set need_find_last_extent here in case we know we were 3686 * processing other items and then walk into the first extent in 3687 * the inode. If we don't hit an extent then nothing changes, 3688 * we'll do the last search the next time around. 3689 */ 3690 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 3691 has_extents = true; 3692 if (first_key.objectid == (u64)-1) 3693 first_key = ins_keys[i]; 3694 } else { 3695 need_find_last_extent = false; 3696 } 3697 3698 /* take a reference on file data extents so that truncates 3699 * or deletes of this inode don't have to relog the inode 3700 * again 3701 */ 3702 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 3703 !skip_csum) { 3704 int found_type; 3705 extent = btrfs_item_ptr(src, start_slot + i, 3706 struct btrfs_file_extent_item); 3707 3708 if (btrfs_file_extent_generation(src, extent) < trans->transid) 3709 continue; 3710 3711 found_type = btrfs_file_extent_type(src, extent); 3712 if (found_type == BTRFS_FILE_EXTENT_REG) { 3713 u64 ds, dl, cs, cl; 3714 ds = btrfs_file_extent_disk_bytenr(src, 3715 extent); 3716 /* ds == 0 is a hole */ 3717 if (ds == 0) 3718 continue; 3719 3720 dl = btrfs_file_extent_disk_num_bytes(src, 3721 extent); 3722 cs = btrfs_file_extent_offset(src, extent); 3723 cl = btrfs_file_extent_num_bytes(src, 3724 extent); 3725 if (btrfs_file_extent_compression(src, 3726 extent)) { 3727 cs = 0; 3728 cl = dl; 3729 } 3730 3731 ret = btrfs_lookup_csums_range( 3732 fs_info->csum_root, 3733 ds + cs, ds + cs + cl - 1, 3734 &ordered_sums, 0); 3735 if (ret) { 3736 btrfs_release_path(dst_path); 3737 kfree(ins_data); 3738 return ret; 3739 } 3740 } 3741 } 3742 } 3743 3744 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 3745 btrfs_release_path(dst_path); 3746 kfree(ins_data); 3747 3748 /* 3749 * we have to do this after the loop above to avoid changing the 3750 * log tree while trying to change the log tree. 3751 */ 3752 ret = 0; 3753 while (!list_empty(&ordered_sums)) { 3754 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3755 struct btrfs_ordered_sum, 3756 list); 3757 if (!ret) 3758 ret = btrfs_csum_file_blocks(trans, log, sums); 3759 list_del(&sums->list); 3760 kfree(sums); 3761 } 3762 3763 if (!has_extents) 3764 return ret; 3765 3766 if (need_find_last_extent && *last_extent == first_key.offset) { 3767 /* 3768 * We don't have any leafs between our current one and the one 3769 * we processed before that can have file extent items for our 3770 * inode (and have a generation number smaller than our current 3771 * transaction id). 3772 */ 3773 need_find_last_extent = false; 3774 } 3775 3776 /* 3777 * Because we use btrfs_search_forward we could skip leaves that were 3778 * not modified and then assume *last_extent is valid when it really 3779 * isn't. So back up to the previous leaf and read the end of the last 3780 * extent before we go and fill in holes. 3781 */ 3782 if (need_find_last_extent) { 3783 u64 len; 3784 3785 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path); 3786 if (ret < 0) 3787 return ret; 3788 if (ret) 3789 goto fill_holes; 3790 if (src_path->slots[0]) 3791 src_path->slots[0]--; 3792 src = src_path->nodes[0]; 3793 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 3794 if (key.objectid != btrfs_ino(inode) || 3795 key.type != BTRFS_EXTENT_DATA_KEY) 3796 goto fill_holes; 3797 extent = btrfs_item_ptr(src, src_path->slots[0], 3798 struct btrfs_file_extent_item); 3799 if (btrfs_file_extent_type(src, extent) == 3800 BTRFS_FILE_EXTENT_INLINE) { 3801 len = btrfs_file_extent_inline_len(src, 3802 src_path->slots[0], 3803 extent); 3804 *last_extent = ALIGN(key.offset + len, 3805 fs_info->sectorsize); 3806 } else { 3807 len = btrfs_file_extent_num_bytes(src, extent); 3808 *last_extent = key.offset + len; 3809 } 3810 } 3811 fill_holes: 3812 /* So we did prev_leaf, now we need to move to the next leaf, but a few 3813 * things could have happened 3814 * 3815 * 1) A merge could have happened, so we could currently be on a leaf 3816 * that holds what we were copying in the first place. 3817 * 2) A split could have happened, and now not all of the items we want 3818 * are on the same leaf. 3819 * 3820 * So we need to adjust how we search for holes, we need to drop the 3821 * path and re-search for the first extent key we found, and then walk 3822 * forward until we hit the last one we copied. 3823 */ 3824 if (need_find_last_extent) { 3825 /* btrfs_prev_leaf could return 1 without releasing the path */ 3826 btrfs_release_path(src_path); 3827 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key, 3828 src_path, 0, 0); 3829 if (ret < 0) 3830 return ret; 3831 ASSERT(ret == 0); 3832 src = src_path->nodes[0]; 3833 i = src_path->slots[0]; 3834 } else { 3835 i = start_slot; 3836 } 3837 3838 /* 3839 * Ok so here we need to go through and fill in any holes we may have 3840 * to make sure that holes are punched for those areas in case they had 3841 * extents previously. 3842 */ 3843 while (!done) { 3844 u64 offset, len; 3845 u64 extent_end; 3846 3847 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 3848 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path); 3849 if (ret < 0) 3850 return ret; 3851 ASSERT(ret == 0); 3852 src = src_path->nodes[0]; 3853 i = 0; 3854 } 3855 3856 btrfs_item_key_to_cpu(src, &key, i); 3857 if (!btrfs_comp_cpu_keys(&key, &last_key)) 3858 done = true; 3859 if (key.objectid != btrfs_ino(inode) || 3860 key.type != BTRFS_EXTENT_DATA_KEY) { 3861 i++; 3862 continue; 3863 } 3864 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 3865 if (btrfs_file_extent_type(src, extent) == 3866 BTRFS_FILE_EXTENT_INLINE) { 3867 len = btrfs_file_extent_inline_len(src, i, extent); 3868 extent_end = ALIGN(key.offset + len, 3869 fs_info->sectorsize); 3870 } else { 3871 len = btrfs_file_extent_num_bytes(src, extent); 3872 extent_end = key.offset + len; 3873 } 3874 i++; 3875 3876 if (*last_extent == key.offset) { 3877 *last_extent = extent_end; 3878 continue; 3879 } 3880 offset = *last_extent; 3881 len = key.offset - *last_extent; 3882 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 3883 offset, 0, 0, len, 0, len, 0, 3884 0, 0); 3885 if (ret) 3886 break; 3887 *last_extent = extent_end; 3888 } 3889 /* 3890 * Need to let the callers know we dropped the path so they should 3891 * re-search. 3892 */ 3893 if (!ret && need_find_last_extent) 3894 ret = 1; 3895 return ret; 3896 } 3897 3898 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 3899 { 3900 struct extent_map *em1, *em2; 3901 3902 em1 = list_entry(a, struct extent_map, list); 3903 em2 = list_entry(b, struct extent_map, list); 3904 3905 if (em1->start < em2->start) 3906 return -1; 3907 else if (em1->start > em2->start) 3908 return 1; 3909 return 0; 3910 } 3911 3912 static int wait_ordered_extents(struct btrfs_trans_handle *trans, 3913 struct inode *inode, 3914 struct btrfs_root *root, 3915 const struct extent_map *em, 3916 const struct list_head *logged_list, 3917 bool *ordered_io_error) 3918 { 3919 struct btrfs_fs_info *fs_info = root->fs_info; 3920 struct btrfs_ordered_extent *ordered; 3921 struct btrfs_root *log = root->log_root; 3922 u64 mod_start = em->mod_start; 3923 u64 mod_len = em->mod_len; 3924 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3925 u64 csum_offset; 3926 u64 csum_len; 3927 LIST_HEAD(ordered_sums); 3928 int ret = 0; 3929 3930 *ordered_io_error = false; 3931 3932 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 3933 em->block_start == EXTENT_MAP_HOLE) 3934 return 0; 3935 3936 /* 3937 * Wait far any ordered extent that covers our extent map. If it 3938 * finishes without an error, first check and see if our csums are on 3939 * our outstanding ordered extents. 3940 */ 3941 list_for_each_entry(ordered, logged_list, log_list) { 3942 struct btrfs_ordered_sum *sum; 3943 3944 if (!mod_len) 3945 break; 3946 3947 if (ordered->file_offset + ordered->len <= mod_start || 3948 mod_start + mod_len <= ordered->file_offset) 3949 continue; 3950 3951 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && 3952 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && 3953 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { 3954 const u64 start = ordered->file_offset; 3955 const u64 end = ordered->file_offset + ordered->len - 1; 3956 3957 WARN_ON(ordered->inode != inode); 3958 filemap_fdatawrite_range(inode->i_mapping, start, end); 3959 } 3960 3961 wait_event(ordered->wait, 3962 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) || 3963 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))); 3964 3965 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) { 3966 /* 3967 * Clear the AS_EIO/AS_ENOSPC flags from the inode's 3968 * i_mapping flags, so that the next fsync won't get 3969 * an outdated io error too. 3970 */ 3971 filemap_check_errors(inode->i_mapping); 3972 *ordered_io_error = true; 3973 break; 3974 } 3975 /* 3976 * We are going to copy all the csums on this ordered extent, so 3977 * go ahead and adjust mod_start and mod_len in case this 3978 * ordered extent has already been logged. 3979 */ 3980 if (ordered->file_offset > mod_start) { 3981 if (ordered->file_offset + ordered->len >= 3982 mod_start + mod_len) 3983 mod_len = ordered->file_offset - mod_start; 3984 /* 3985 * If we have this case 3986 * 3987 * |--------- logged extent ---------| 3988 * |----- ordered extent ----| 3989 * 3990 * Just don't mess with mod_start and mod_len, we'll 3991 * just end up logging more csums than we need and it 3992 * will be ok. 3993 */ 3994 } else { 3995 if (ordered->file_offset + ordered->len < 3996 mod_start + mod_len) { 3997 mod_len = (mod_start + mod_len) - 3998 (ordered->file_offset + ordered->len); 3999 mod_start = ordered->file_offset + 4000 ordered->len; 4001 } else { 4002 mod_len = 0; 4003 } 4004 } 4005 4006 if (skip_csum) 4007 continue; 4008 4009 /* 4010 * To keep us from looping for the above case of an ordered 4011 * extent that falls inside of the logged extent. 4012 */ 4013 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, 4014 &ordered->flags)) 4015 continue; 4016 4017 list_for_each_entry(sum, &ordered->list, list) { 4018 ret = btrfs_csum_file_blocks(trans, log, sum); 4019 if (ret) 4020 break; 4021 } 4022 } 4023 4024 if (*ordered_io_error || !mod_len || ret || skip_csum) 4025 return ret; 4026 4027 if (em->compress_type) { 4028 csum_offset = 0; 4029 csum_len = max(em->block_len, em->orig_block_len); 4030 } else { 4031 csum_offset = mod_start - em->start; 4032 csum_len = mod_len; 4033 } 4034 4035 /* block start is already adjusted for the file extent offset. */ 4036 ret = btrfs_lookup_csums_range(fs_info->csum_root, 4037 em->block_start + csum_offset, 4038 em->block_start + csum_offset + 4039 csum_len - 1, &ordered_sums, 0); 4040 if (ret) 4041 return ret; 4042 4043 while (!list_empty(&ordered_sums)) { 4044 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4045 struct btrfs_ordered_sum, 4046 list); 4047 if (!ret) 4048 ret = btrfs_csum_file_blocks(trans, log, sums); 4049 list_del(&sums->list); 4050 kfree(sums); 4051 } 4052 4053 return ret; 4054 } 4055 4056 static int log_one_extent(struct btrfs_trans_handle *trans, 4057 struct inode *inode, struct btrfs_root *root, 4058 const struct extent_map *em, 4059 struct btrfs_path *path, 4060 const struct list_head *logged_list, 4061 struct btrfs_log_ctx *ctx) 4062 { 4063 struct btrfs_root *log = root->log_root; 4064 struct btrfs_file_extent_item *fi; 4065 struct extent_buffer *leaf; 4066 struct btrfs_map_token token; 4067 struct btrfs_key key; 4068 u64 extent_offset = em->start - em->orig_start; 4069 u64 block_len; 4070 int ret; 4071 int extent_inserted = 0; 4072 bool ordered_io_err = false; 4073 4074 ret = wait_ordered_extents(trans, inode, root, em, logged_list, 4075 &ordered_io_err); 4076 if (ret) 4077 return ret; 4078 4079 if (ordered_io_err) { 4080 ctx->io_err = -EIO; 4081 return 0; 4082 } 4083 4084 btrfs_init_map_token(&token); 4085 4086 ret = __btrfs_drop_extents(trans, log, inode, path, em->start, 4087 em->start + em->len, NULL, 0, 1, 4088 sizeof(*fi), &extent_inserted); 4089 if (ret) 4090 return ret; 4091 4092 if (!extent_inserted) { 4093 key.objectid = btrfs_ino(inode); 4094 key.type = BTRFS_EXTENT_DATA_KEY; 4095 key.offset = em->start; 4096 4097 ret = btrfs_insert_empty_item(trans, log, path, &key, 4098 sizeof(*fi)); 4099 if (ret) 4100 return ret; 4101 } 4102 leaf = path->nodes[0]; 4103 fi = btrfs_item_ptr(leaf, path->slots[0], 4104 struct btrfs_file_extent_item); 4105 4106 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4107 &token); 4108 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4109 btrfs_set_token_file_extent_type(leaf, fi, 4110 BTRFS_FILE_EXTENT_PREALLOC, 4111 &token); 4112 else 4113 btrfs_set_token_file_extent_type(leaf, fi, 4114 BTRFS_FILE_EXTENT_REG, 4115 &token); 4116 4117 block_len = max(em->block_len, em->orig_block_len); 4118 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4119 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4120 em->block_start, 4121 &token); 4122 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4123 &token); 4124 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4125 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4126 em->block_start - 4127 extent_offset, &token); 4128 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4129 &token); 4130 } else { 4131 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4132 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4133 &token); 4134 } 4135 4136 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4137 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4138 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4139 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4140 &token); 4141 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4142 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4143 btrfs_mark_buffer_dirty(leaf); 4144 4145 btrfs_release_path(path); 4146 4147 return ret; 4148 } 4149 4150 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4151 struct btrfs_root *root, 4152 struct inode *inode, 4153 struct btrfs_path *path, 4154 struct list_head *logged_list, 4155 struct btrfs_log_ctx *ctx, 4156 const u64 start, 4157 const u64 end) 4158 { 4159 struct extent_map *em, *n; 4160 struct list_head extents; 4161 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 4162 u64 test_gen; 4163 int ret = 0; 4164 int num = 0; 4165 4166 INIT_LIST_HEAD(&extents); 4167 4168 down_write(&BTRFS_I(inode)->dio_sem); 4169 write_lock(&tree->lock); 4170 test_gen = root->fs_info->last_trans_committed; 4171 4172 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4173 list_del_init(&em->list); 4174 4175 /* 4176 * Just an arbitrary number, this can be really CPU intensive 4177 * once we start getting a lot of extents, and really once we 4178 * have a bunch of extents we just want to commit since it will 4179 * be faster. 4180 */ 4181 if (++num > 32768) { 4182 list_del_init(&tree->modified_extents); 4183 ret = -EFBIG; 4184 goto process; 4185 } 4186 4187 if (em->generation <= test_gen) 4188 continue; 4189 /* Need a ref to keep it from getting evicted from cache */ 4190 atomic_inc(&em->refs); 4191 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4192 list_add_tail(&em->list, &extents); 4193 num++; 4194 } 4195 4196 list_sort(NULL, &extents, extent_cmp); 4197 btrfs_get_logged_extents(inode, logged_list, start, end); 4198 /* 4199 * Some ordered extents started by fsync might have completed 4200 * before we could collect them into the list logged_list, which 4201 * means they're gone, not in our logged_list nor in the inode's 4202 * ordered tree. We want the application/user space to know an 4203 * error happened while attempting to persist file data so that 4204 * it can take proper action. If such error happened, we leave 4205 * without writing to the log tree and the fsync must report the 4206 * file data write error and not commit the current transaction. 4207 */ 4208 ret = filemap_check_errors(inode->i_mapping); 4209 if (ret) 4210 ctx->io_err = ret; 4211 process: 4212 while (!list_empty(&extents)) { 4213 em = list_entry(extents.next, struct extent_map, list); 4214 4215 list_del_init(&em->list); 4216 4217 /* 4218 * If we had an error we just need to delete everybody from our 4219 * private list. 4220 */ 4221 if (ret) { 4222 clear_em_logging(tree, em); 4223 free_extent_map(em); 4224 continue; 4225 } 4226 4227 write_unlock(&tree->lock); 4228 4229 ret = log_one_extent(trans, inode, root, em, path, logged_list, 4230 ctx); 4231 write_lock(&tree->lock); 4232 clear_em_logging(tree, em); 4233 free_extent_map(em); 4234 } 4235 WARN_ON(!list_empty(&extents)); 4236 write_unlock(&tree->lock); 4237 up_write(&BTRFS_I(inode)->dio_sem); 4238 4239 btrfs_release_path(path); 4240 return ret; 4241 } 4242 4243 static int logged_inode_size(struct btrfs_root *log, struct inode *inode, 4244 struct btrfs_path *path, u64 *size_ret) 4245 { 4246 struct btrfs_key key; 4247 int ret; 4248 4249 key.objectid = btrfs_ino(inode); 4250 key.type = BTRFS_INODE_ITEM_KEY; 4251 key.offset = 0; 4252 4253 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4254 if (ret < 0) { 4255 return ret; 4256 } else if (ret > 0) { 4257 *size_ret = 0; 4258 } else { 4259 struct btrfs_inode_item *item; 4260 4261 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4262 struct btrfs_inode_item); 4263 *size_ret = btrfs_inode_size(path->nodes[0], item); 4264 } 4265 4266 btrfs_release_path(path); 4267 return 0; 4268 } 4269 4270 /* 4271 * At the moment we always log all xattrs. This is to figure out at log replay 4272 * time which xattrs must have their deletion replayed. If a xattr is missing 4273 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4274 * because if a xattr is deleted, the inode is fsynced and a power failure 4275 * happens, causing the log to be replayed the next time the fs is mounted, 4276 * we want the xattr to not exist anymore (same behaviour as other filesystems 4277 * with a journal, ext3/4, xfs, f2fs, etc). 4278 */ 4279 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4280 struct btrfs_root *root, 4281 struct inode *inode, 4282 struct btrfs_path *path, 4283 struct btrfs_path *dst_path) 4284 { 4285 int ret; 4286 struct btrfs_key key; 4287 const u64 ino = btrfs_ino(inode); 4288 int ins_nr = 0; 4289 int start_slot = 0; 4290 4291 key.objectid = ino; 4292 key.type = BTRFS_XATTR_ITEM_KEY; 4293 key.offset = 0; 4294 4295 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4296 if (ret < 0) 4297 return ret; 4298 4299 while (true) { 4300 int slot = path->slots[0]; 4301 struct extent_buffer *leaf = path->nodes[0]; 4302 int nritems = btrfs_header_nritems(leaf); 4303 4304 if (slot >= nritems) { 4305 if (ins_nr > 0) { 4306 u64 last_extent = 0; 4307 4308 ret = copy_items(trans, inode, dst_path, path, 4309 &last_extent, start_slot, 4310 ins_nr, 1, 0); 4311 /* can't be 1, extent items aren't processed */ 4312 ASSERT(ret <= 0); 4313 if (ret < 0) 4314 return ret; 4315 ins_nr = 0; 4316 } 4317 ret = btrfs_next_leaf(root, path); 4318 if (ret < 0) 4319 return ret; 4320 else if (ret > 0) 4321 break; 4322 continue; 4323 } 4324 4325 btrfs_item_key_to_cpu(leaf, &key, slot); 4326 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4327 break; 4328 4329 if (ins_nr == 0) 4330 start_slot = slot; 4331 ins_nr++; 4332 path->slots[0]++; 4333 cond_resched(); 4334 } 4335 if (ins_nr > 0) { 4336 u64 last_extent = 0; 4337 4338 ret = copy_items(trans, inode, dst_path, path, 4339 &last_extent, start_slot, 4340 ins_nr, 1, 0); 4341 /* can't be 1, extent items aren't processed */ 4342 ASSERT(ret <= 0); 4343 if (ret < 0) 4344 return ret; 4345 } 4346 4347 return 0; 4348 } 4349 4350 /* 4351 * If the no holes feature is enabled we need to make sure any hole between the 4352 * last extent and the i_size of our inode is explicitly marked in the log. This 4353 * is to make sure that doing something like: 4354 * 4355 * 1) create file with 128Kb of data 4356 * 2) truncate file to 64Kb 4357 * 3) truncate file to 256Kb 4358 * 4) fsync file 4359 * 5) <crash/power failure> 4360 * 6) mount fs and trigger log replay 4361 * 4362 * Will give us a file with a size of 256Kb, the first 64Kb of data match what 4363 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the 4364 * file correspond to a hole. The presence of explicit holes in a log tree is 4365 * what guarantees that log replay will remove/adjust file extent items in the 4366 * fs/subvol tree. 4367 * 4368 * Here we do not need to care about holes between extents, that is already done 4369 * by copy_items(). We also only need to do this in the full sync path, where we 4370 * lookup for extents from the fs/subvol tree only. In the fast path case, we 4371 * lookup the list of modified extent maps and if any represents a hole, we 4372 * insert a corresponding extent representing a hole in the log tree. 4373 */ 4374 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, 4375 struct btrfs_root *root, 4376 struct inode *inode, 4377 struct btrfs_path *path) 4378 { 4379 struct btrfs_fs_info *fs_info = root->fs_info; 4380 int ret; 4381 struct btrfs_key key; 4382 u64 hole_start; 4383 u64 hole_size; 4384 struct extent_buffer *leaf; 4385 struct btrfs_root *log = root->log_root; 4386 const u64 ino = btrfs_ino(inode); 4387 const u64 i_size = i_size_read(inode); 4388 4389 if (!btrfs_fs_incompat(fs_info, NO_HOLES)) 4390 return 0; 4391 4392 key.objectid = ino; 4393 key.type = BTRFS_EXTENT_DATA_KEY; 4394 key.offset = (u64)-1; 4395 4396 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4397 ASSERT(ret != 0); 4398 if (ret < 0) 4399 return ret; 4400 4401 ASSERT(path->slots[0] > 0); 4402 path->slots[0]--; 4403 leaf = path->nodes[0]; 4404 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4405 4406 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { 4407 /* inode does not have any extents */ 4408 hole_start = 0; 4409 hole_size = i_size; 4410 } else { 4411 struct btrfs_file_extent_item *extent; 4412 u64 len; 4413 4414 /* 4415 * If there's an extent beyond i_size, an explicit hole was 4416 * already inserted by copy_items(). 4417 */ 4418 if (key.offset >= i_size) 4419 return 0; 4420 4421 extent = btrfs_item_ptr(leaf, path->slots[0], 4422 struct btrfs_file_extent_item); 4423 4424 if (btrfs_file_extent_type(leaf, extent) == 4425 BTRFS_FILE_EXTENT_INLINE) { 4426 len = btrfs_file_extent_inline_len(leaf, 4427 path->slots[0], 4428 extent); 4429 ASSERT(len == i_size); 4430 return 0; 4431 } 4432 4433 len = btrfs_file_extent_num_bytes(leaf, extent); 4434 /* Last extent goes beyond i_size, no need to log a hole. */ 4435 if (key.offset + len > i_size) 4436 return 0; 4437 hole_start = key.offset + len; 4438 hole_size = i_size - hole_start; 4439 } 4440 btrfs_release_path(path); 4441 4442 /* Last extent ends at i_size. */ 4443 if (hole_size == 0) 4444 return 0; 4445 4446 hole_size = ALIGN(hole_size, fs_info->sectorsize); 4447 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, 4448 hole_size, 0, hole_size, 0, 0, 0); 4449 return ret; 4450 } 4451 4452 /* 4453 * When we are logging a new inode X, check if it doesn't have a reference that 4454 * matches the reference from some other inode Y created in a past transaction 4455 * and that was renamed in the current transaction. If we don't do this, then at 4456 * log replay time we can lose inode Y (and all its files if it's a directory): 4457 * 4458 * mkdir /mnt/x 4459 * echo "hello world" > /mnt/x/foobar 4460 * sync 4461 * mv /mnt/x /mnt/y 4462 * mkdir /mnt/x # or touch /mnt/x 4463 * xfs_io -c fsync /mnt/x 4464 * <power fail> 4465 * mount fs, trigger log replay 4466 * 4467 * After the log replay procedure, we would lose the first directory and all its 4468 * files (file foobar). 4469 * For the case where inode Y is not a directory we simply end up losing it: 4470 * 4471 * echo "123" > /mnt/foo 4472 * sync 4473 * mv /mnt/foo /mnt/bar 4474 * echo "abc" > /mnt/foo 4475 * xfs_io -c fsync /mnt/foo 4476 * <power fail> 4477 * 4478 * We also need this for cases where a snapshot entry is replaced by some other 4479 * entry (file or directory) otherwise we end up with an unreplayable log due to 4480 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4481 * if it were a regular entry: 4482 * 4483 * mkdir /mnt/x 4484 * btrfs subvolume snapshot /mnt /mnt/x/snap 4485 * btrfs subvolume delete /mnt/x/snap 4486 * rmdir /mnt/x 4487 * mkdir /mnt/x 4488 * fsync /mnt/x or fsync some new file inside it 4489 * <power fail> 4490 * 4491 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4492 * the same transaction. 4493 */ 4494 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4495 const int slot, 4496 const struct btrfs_key *key, 4497 struct inode *inode, 4498 u64 *other_ino) 4499 { 4500 int ret; 4501 struct btrfs_path *search_path; 4502 char *name = NULL; 4503 u32 name_len = 0; 4504 u32 item_size = btrfs_item_size_nr(eb, slot); 4505 u32 cur_offset = 0; 4506 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4507 4508 search_path = btrfs_alloc_path(); 4509 if (!search_path) 4510 return -ENOMEM; 4511 search_path->search_commit_root = 1; 4512 search_path->skip_locking = 1; 4513 4514 while (cur_offset < item_size) { 4515 u64 parent; 4516 u32 this_name_len; 4517 u32 this_len; 4518 unsigned long name_ptr; 4519 struct btrfs_dir_item *di; 4520 4521 if (key->type == BTRFS_INODE_REF_KEY) { 4522 struct btrfs_inode_ref *iref; 4523 4524 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4525 parent = key->offset; 4526 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4527 name_ptr = (unsigned long)(iref + 1); 4528 this_len = sizeof(*iref) + this_name_len; 4529 } else { 4530 struct btrfs_inode_extref *extref; 4531 4532 extref = (struct btrfs_inode_extref *)(ptr + 4533 cur_offset); 4534 parent = btrfs_inode_extref_parent(eb, extref); 4535 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4536 name_ptr = (unsigned long)&extref->name; 4537 this_len = sizeof(*extref) + this_name_len; 4538 } 4539 4540 if (this_name_len > name_len) { 4541 char *new_name; 4542 4543 new_name = krealloc(name, this_name_len, GFP_NOFS); 4544 if (!new_name) { 4545 ret = -ENOMEM; 4546 goto out; 4547 } 4548 name_len = this_name_len; 4549 name = new_name; 4550 } 4551 4552 read_extent_buffer(eb, name, name_ptr, this_name_len); 4553 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root, 4554 search_path, parent, 4555 name, this_name_len, 0); 4556 if (di && !IS_ERR(di)) { 4557 struct btrfs_key di_key; 4558 4559 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4560 di, &di_key); 4561 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4562 ret = 1; 4563 *other_ino = di_key.objectid; 4564 } else { 4565 ret = -EAGAIN; 4566 } 4567 goto out; 4568 } else if (IS_ERR(di)) { 4569 ret = PTR_ERR(di); 4570 goto out; 4571 } 4572 btrfs_release_path(search_path); 4573 4574 cur_offset += this_len; 4575 } 4576 ret = 0; 4577 out: 4578 btrfs_free_path(search_path); 4579 kfree(name); 4580 return ret; 4581 } 4582 4583 /* log a single inode in the tree log. 4584 * At least one parent directory for this inode must exist in the tree 4585 * or be logged already. 4586 * 4587 * Any items from this inode changed by the current transaction are copied 4588 * to the log tree. An extra reference is taken on any extents in this 4589 * file, allowing us to avoid a whole pile of corner cases around logging 4590 * blocks that have been removed from the tree. 4591 * 4592 * See LOG_INODE_ALL and related defines for a description of what inode_only 4593 * does. 4594 * 4595 * This handles both files and directories. 4596 */ 4597 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 4598 struct btrfs_root *root, struct inode *inode, 4599 int inode_only, 4600 const loff_t start, 4601 const loff_t end, 4602 struct btrfs_log_ctx *ctx) 4603 { 4604 struct btrfs_fs_info *fs_info = root->fs_info; 4605 struct btrfs_path *path; 4606 struct btrfs_path *dst_path; 4607 struct btrfs_key min_key; 4608 struct btrfs_key max_key; 4609 struct btrfs_root *log = root->log_root; 4610 struct extent_buffer *src = NULL; 4611 LIST_HEAD(logged_list); 4612 u64 last_extent = 0; 4613 int err = 0; 4614 int ret; 4615 int nritems; 4616 int ins_start_slot = 0; 4617 int ins_nr; 4618 bool fast_search = false; 4619 u64 ino = btrfs_ino(inode); 4620 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4621 u64 logged_isize = 0; 4622 bool need_log_inode_item = true; 4623 4624 path = btrfs_alloc_path(); 4625 if (!path) 4626 return -ENOMEM; 4627 dst_path = btrfs_alloc_path(); 4628 if (!dst_path) { 4629 btrfs_free_path(path); 4630 return -ENOMEM; 4631 } 4632 4633 min_key.objectid = ino; 4634 min_key.type = BTRFS_INODE_ITEM_KEY; 4635 min_key.offset = 0; 4636 4637 max_key.objectid = ino; 4638 4639 4640 /* today the code can only do partial logging of directories */ 4641 if (S_ISDIR(inode->i_mode) || 4642 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4643 &BTRFS_I(inode)->runtime_flags) && 4644 inode_only == LOG_INODE_EXISTS)) 4645 max_key.type = BTRFS_XATTR_ITEM_KEY; 4646 else 4647 max_key.type = (u8)-1; 4648 max_key.offset = (u64)-1; 4649 4650 /* 4651 * Only run delayed items if we are a dir or a new file. 4652 * Otherwise commit the delayed inode only, which is needed in 4653 * order for the log replay code to mark inodes for link count 4654 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). 4655 */ 4656 if (S_ISDIR(inode->i_mode) || 4657 BTRFS_I(inode)->generation > fs_info->last_trans_committed) 4658 ret = btrfs_commit_inode_delayed_items(trans, inode); 4659 else 4660 ret = btrfs_commit_inode_delayed_inode(inode); 4661 4662 if (ret) { 4663 btrfs_free_path(path); 4664 btrfs_free_path(dst_path); 4665 return ret; 4666 } 4667 4668 mutex_lock(&BTRFS_I(inode)->log_mutex); 4669 4670 /* 4671 * a brute force approach to making sure we get the most uptodate 4672 * copies of everything. 4673 */ 4674 if (S_ISDIR(inode->i_mode)) { 4675 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 4676 4677 if (inode_only == LOG_INODE_EXISTS) 4678 max_key_type = BTRFS_XATTR_ITEM_KEY; 4679 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 4680 } else { 4681 if (inode_only == LOG_INODE_EXISTS) { 4682 /* 4683 * Make sure the new inode item we write to the log has 4684 * the same isize as the current one (if it exists). 4685 * This is necessary to prevent data loss after log 4686 * replay, and also to prevent doing a wrong expanding 4687 * truncate - for e.g. create file, write 4K into offset 4688 * 0, fsync, write 4K into offset 4096, add hard link, 4689 * fsync some other file (to sync log), power fail - if 4690 * we use the inode's current i_size, after log replay 4691 * we get a 8Kb file, with the last 4Kb extent as a hole 4692 * (zeroes), as if an expanding truncate happened, 4693 * instead of getting a file of 4Kb only. 4694 */ 4695 err = logged_inode_size(log, inode, path, 4696 &logged_isize); 4697 if (err) 4698 goto out_unlock; 4699 } 4700 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4701 &BTRFS_I(inode)->runtime_flags)) { 4702 if (inode_only == LOG_INODE_EXISTS) { 4703 max_key.type = BTRFS_XATTR_ITEM_KEY; 4704 ret = drop_objectid_items(trans, log, path, ino, 4705 max_key.type); 4706 } else { 4707 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4708 &BTRFS_I(inode)->runtime_flags); 4709 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4710 &BTRFS_I(inode)->runtime_flags); 4711 while(1) { 4712 ret = btrfs_truncate_inode_items(trans, 4713 log, inode, 0, 0); 4714 if (ret != -EAGAIN) 4715 break; 4716 } 4717 } 4718 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4719 &BTRFS_I(inode)->runtime_flags) || 4720 inode_only == LOG_INODE_EXISTS) { 4721 if (inode_only == LOG_INODE_ALL) 4722 fast_search = true; 4723 max_key.type = BTRFS_XATTR_ITEM_KEY; 4724 ret = drop_objectid_items(trans, log, path, ino, 4725 max_key.type); 4726 } else { 4727 if (inode_only == LOG_INODE_ALL) 4728 fast_search = true; 4729 goto log_extents; 4730 } 4731 4732 } 4733 if (ret) { 4734 err = ret; 4735 goto out_unlock; 4736 } 4737 4738 while (1) { 4739 ins_nr = 0; 4740 ret = btrfs_search_forward(root, &min_key, 4741 path, trans->transid); 4742 if (ret < 0) { 4743 err = ret; 4744 goto out_unlock; 4745 } 4746 if (ret != 0) 4747 break; 4748 again: 4749 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 4750 if (min_key.objectid != ino) 4751 break; 4752 if (min_key.type > max_key.type) 4753 break; 4754 4755 if (min_key.type == BTRFS_INODE_ITEM_KEY) 4756 need_log_inode_item = false; 4757 4758 if ((min_key.type == BTRFS_INODE_REF_KEY || 4759 min_key.type == BTRFS_INODE_EXTREF_KEY) && 4760 BTRFS_I(inode)->generation == trans->transid) { 4761 u64 other_ino = 0; 4762 4763 ret = btrfs_check_ref_name_override(path->nodes[0], 4764 path->slots[0], 4765 &min_key, inode, 4766 &other_ino); 4767 if (ret < 0) { 4768 err = ret; 4769 goto out_unlock; 4770 } else if (ret > 0 && ctx && 4771 other_ino != btrfs_ino(ctx->inode)) { 4772 struct btrfs_key inode_key; 4773 struct inode *other_inode; 4774 4775 if (ins_nr > 0) { 4776 ins_nr++; 4777 } else { 4778 ins_nr = 1; 4779 ins_start_slot = path->slots[0]; 4780 } 4781 ret = copy_items(trans, inode, dst_path, path, 4782 &last_extent, ins_start_slot, 4783 ins_nr, inode_only, 4784 logged_isize); 4785 if (ret < 0) { 4786 err = ret; 4787 goto out_unlock; 4788 } 4789 ins_nr = 0; 4790 btrfs_release_path(path); 4791 inode_key.objectid = other_ino; 4792 inode_key.type = BTRFS_INODE_ITEM_KEY; 4793 inode_key.offset = 0; 4794 other_inode = btrfs_iget(fs_info->sb, 4795 &inode_key, root, 4796 NULL); 4797 /* 4798 * If the other inode that had a conflicting dir 4799 * entry was deleted in the current transaction, 4800 * we don't need to do more work nor fallback to 4801 * a transaction commit. 4802 */ 4803 if (IS_ERR(other_inode) && 4804 PTR_ERR(other_inode) == -ENOENT) { 4805 goto next_key; 4806 } else if (IS_ERR(other_inode)) { 4807 err = PTR_ERR(other_inode); 4808 goto out_unlock; 4809 } 4810 /* 4811 * We are safe logging the other inode without 4812 * acquiring its i_mutex as long as we log with 4813 * the LOG_INODE_EXISTS mode. We're safe against 4814 * concurrent renames of the other inode as well 4815 * because during a rename we pin the log and 4816 * update the log with the new name before we 4817 * unpin it. 4818 */ 4819 err = btrfs_log_inode(trans, root, other_inode, 4820 LOG_INODE_EXISTS, 4821 0, LLONG_MAX, ctx); 4822 iput(other_inode); 4823 if (err) 4824 goto out_unlock; 4825 else 4826 goto next_key; 4827 } 4828 } 4829 4830 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 4831 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 4832 if (ins_nr == 0) 4833 goto next_slot; 4834 ret = copy_items(trans, inode, dst_path, path, 4835 &last_extent, ins_start_slot, 4836 ins_nr, inode_only, logged_isize); 4837 if (ret < 0) { 4838 err = ret; 4839 goto out_unlock; 4840 } 4841 ins_nr = 0; 4842 if (ret) { 4843 btrfs_release_path(path); 4844 continue; 4845 } 4846 goto next_slot; 4847 } 4848 4849 src = path->nodes[0]; 4850 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 4851 ins_nr++; 4852 goto next_slot; 4853 } else if (!ins_nr) { 4854 ins_start_slot = path->slots[0]; 4855 ins_nr = 1; 4856 goto next_slot; 4857 } 4858 4859 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4860 ins_start_slot, ins_nr, inode_only, 4861 logged_isize); 4862 if (ret < 0) { 4863 err = ret; 4864 goto out_unlock; 4865 } 4866 if (ret) { 4867 ins_nr = 0; 4868 btrfs_release_path(path); 4869 continue; 4870 } 4871 ins_nr = 1; 4872 ins_start_slot = path->slots[0]; 4873 next_slot: 4874 4875 nritems = btrfs_header_nritems(path->nodes[0]); 4876 path->slots[0]++; 4877 if (path->slots[0] < nritems) { 4878 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 4879 path->slots[0]); 4880 goto again; 4881 } 4882 if (ins_nr) { 4883 ret = copy_items(trans, inode, dst_path, path, 4884 &last_extent, ins_start_slot, 4885 ins_nr, inode_only, logged_isize); 4886 if (ret < 0) { 4887 err = ret; 4888 goto out_unlock; 4889 } 4890 ret = 0; 4891 ins_nr = 0; 4892 } 4893 btrfs_release_path(path); 4894 next_key: 4895 if (min_key.offset < (u64)-1) { 4896 min_key.offset++; 4897 } else if (min_key.type < max_key.type) { 4898 min_key.type++; 4899 min_key.offset = 0; 4900 } else { 4901 break; 4902 } 4903 } 4904 if (ins_nr) { 4905 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4906 ins_start_slot, ins_nr, inode_only, 4907 logged_isize); 4908 if (ret < 0) { 4909 err = ret; 4910 goto out_unlock; 4911 } 4912 ret = 0; 4913 ins_nr = 0; 4914 } 4915 4916 btrfs_release_path(path); 4917 btrfs_release_path(dst_path); 4918 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 4919 if (err) 4920 goto out_unlock; 4921 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 4922 btrfs_release_path(path); 4923 btrfs_release_path(dst_path); 4924 err = btrfs_log_trailing_hole(trans, root, inode, path); 4925 if (err) 4926 goto out_unlock; 4927 } 4928 log_extents: 4929 btrfs_release_path(path); 4930 btrfs_release_path(dst_path); 4931 if (need_log_inode_item) { 4932 err = log_inode_item(trans, log, dst_path, inode); 4933 if (err) 4934 goto out_unlock; 4935 } 4936 if (fast_search) { 4937 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 4938 &logged_list, ctx, start, end); 4939 if (ret) { 4940 err = ret; 4941 goto out_unlock; 4942 } 4943 } else if (inode_only == LOG_INODE_ALL) { 4944 struct extent_map *em, *n; 4945 4946 write_lock(&em_tree->lock); 4947 /* 4948 * We can't just remove every em if we're called for a ranged 4949 * fsync - that is, one that doesn't cover the whole possible 4950 * file range (0 to LLONG_MAX). This is because we can have 4951 * em's that fall outside the range we're logging and therefore 4952 * their ordered operations haven't completed yet 4953 * (btrfs_finish_ordered_io() not invoked yet). This means we 4954 * didn't get their respective file extent item in the fs/subvol 4955 * tree yet, and need to let the next fast fsync (one which 4956 * consults the list of modified extent maps) find the em so 4957 * that it logs a matching file extent item and waits for the 4958 * respective ordered operation to complete (if it's still 4959 * running). 4960 * 4961 * Removing every em outside the range we're logging would make 4962 * the next fast fsync not log their matching file extent items, 4963 * therefore making us lose data after a log replay. 4964 */ 4965 list_for_each_entry_safe(em, n, &em_tree->modified_extents, 4966 list) { 4967 const u64 mod_end = em->mod_start + em->mod_len - 1; 4968 4969 if (em->mod_start >= start && mod_end <= end) 4970 list_del_init(&em->list); 4971 } 4972 write_unlock(&em_tree->lock); 4973 } 4974 4975 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 4976 ret = log_directory_changes(trans, root, inode, path, dst_path, 4977 ctx); 4978 if (ret) { 4979 err = ret; 4980 goto out_unlock; 4981 } 4982 } 4983 4984 spin_lock(&BTRFS_I(inode)->lock); 4985 BTRFS_I(inode)->logged_trans = trans->transid; 4986 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; 4987 spin_unlock(&BTRFS_I(inode)->lock); 4988 out_unlock: 4989 if (unlikely(err)) 4990 btrfs_put_logged_extents(&logged_list); 4991 else 4992 btrfs_submit_logged_extents(&logged_list, log); 4993 mutex_unlock(&BTRFS_I(inode)->log_mutex); 4994 4995 btrfs_free_path(path); 4996 btrfs_free_path(dst_path); 4997 return err; 4998 } 4999 5000 /* 5001 * Check if we must fallback to a transaction commit when logging an inode. 5002 * This must be called after logging the inode and is used only in the context 5003 * when fsyncing an inode requires the need to log some other inode - in which 5004 * case we can't lock the i_mutex of each other inode we need to log as that 5005 * can lead to deadlocks with concurrent fsync against other inodes (as we can 5006 * log inodes up or down in the hierarchy) or rename operations for example. So 5007 * we take the log_mutex of the inode after we have logged it and then check for 5008 * its last_unlink_trans value - this is safe because any task setting 5009 * last_unlink_trans must take the log_mutex and it must do this before it does 5010 * the actual unlink operation, so if we do this check before a concurrent task 5011 * sets last_unlink_trans it means we've logged a consistent version/state of 5012 * all the inode items, otherwise we are not sure and must do a transaction 5013 * commit (the concurrent task might have only updated last_unlink_trans before 5014 * we logged the inode or it might have also done the unlink). 5015 */ 5016 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 5017 struct inode *inode) 5018 { 5019 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 5020 bool ret = false; 5021 5022 mutex_lock(&BTRFS_I(inode)->log_mutex); 5023 if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) { 5024 /* 5025 * Make sure any commits to the log are forced to be full 5026 * commits. 5027 */ 5028 btrfs_set_log_full_commit(fs_info, trans); 5029 ret = true; 5030 } 5031 mutex_unlock(&BTRFS_I(inode)->log_mutex); 5032 5033 return ret; 5034 } 5035 5036 /* 5037 * follow the dentry parent pointers up the chain and see if any 5038 * of the directories in it require a full commit before they can 5039 * be logged. Returns zero if nothing special needs to be done or 1 if 5040 * a full commit is required. 5041 */ 5042 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 5043 struct inode *inode, 5044 struct dentry *parent, 5045 struct super_block *sb, 5046 u64 last_committed) 5047 { 5048 int ret = 0; 5049 struct dentry *old_parent = NULL; 5050 struct inode *orig_inode = inode; 5051 5052 /* 5053 * for regular files, if its inode is already on disk, we don't 5054 * have to worry about the parents at all. This is because 5055 * we can use the last_unlink_trans field to record renames 5056 * and other fun in this file. 5057 */ 5058 if (S_ISREG(inode->i_mode) && 5059 BTRFS_I(inode)->generation <= last_committed && 5060 BTRFS_I(inode)->last_unlink_trans <= last_committed) 5061 goto out; 5062 5063 if (!S_ISDIR(inode->i_mode)) { 5064 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5065 goto out; 5066 inode = d_inode(parent); 5067 } 5068 5069 while (1) { 5070 /* 5071 * If we are logging a directory then we start with our inode, 5072 * not our parent's inode, so we need to skip setting the 5073 * logged_trans so that further down in the log code we don't 5074 * think this inode has already been logged. 5075 */ 5076 if (inode != orig_inode) 5077 BTRFS_I(inode)->logged_trans = trans->transid; 5078 smp_mb(); 5079 5080 if (btrfs_must_commit_transaction(trans, inode)) { 5081 ret = 1; 5082 break; 5083 } 5084 5085 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5086 break; 5087 5088 if (IS_ROOT(parent)) { 5089 inode = d_inode(parent); 5090 if (btrfs_must_commit_transaction(trans, inode)) 5091 ret = 1; 5092 break; 5093 } 5094 5095 parent = dget_parent(parent); 5096 dput(old_parent); 5097 old_parent = parent; 5098 inode = d_inode(parent); 5099 5100 } 5101 dput(old_parent); 5102 out: 5103 return ret; 5104 } 5105 5106 struct btrfs_dir_list { 5107 u64 ino; 5108 struct list_head list; 5109 }; 5110 5111 /* 5112 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5113 * details about the why it is needed. 5114 * This is a recursive operation - if an existing dentry corresponds to a 5115 * directory, that directory's new entries are logged too (same behaviour as 5116 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5117 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5118 * complains about the following circular lock dependency / possible deadlock: 5119 * 5120 * CPU0 CPU1 5121 * ---- ---- 5122 * lock(&type->i_mutex_dir_key#3/2); 5123 * lock(sb_internal#2); 5124 * lock(&type->i_mutex_dir_key#3/2); 5125 * lock(&sb->s_type->i_mutex_key#14); 5126 * 5127 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5128 * sb_start_intwrite() in btrfs_start_transaction(). 5129 * Not locking i_mutex of the inodes is still safe because: 5130 * 5131 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5132 * that while logging the inode new references (names) are added or removed 5133 * from the inode, leaving the logged inode item with a link count that does 5134 * not match the number of logged inode reference items. This is fine because 5135 * at log replay time we compute the real number of links and correct the 5136 * link count in the inode item (see replay_one_buffer() and 5137 * link_to_fixup_dir()); 5138 * 5139 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5140 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5141 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5142 * has a size that doesn't match the sum of the lengths of all the logged 5143 * names. This does not result in a problem because if a dir_item key is 5144 * logged but its matching dir_index key is not logged, at log replay time we 5145 * don't use it to replay the respective name (see replay_one_name()). On the 5146 * other hand if only the dir_index key ends up being logged, the respective 5147 * name is added to the fs/subvol tree with both the dir_item and dir_index 5148 * keys created (see replay_one_name()). 5149 * The directory's inode item with a wrong i_size is not a problem as well, 5150 * since we don't use it at log replay time to set the i_size in the inode 5151 * item of the fs/subvol tree (see overwrite_item()). 5152 */ 5153 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5154 struct btrfs_root *root, 5155 struct inode *start_inode, 5156 struct btrfs_log_ctx *ctx) 5157 { 5158 struct btrfs_fs_info *fs_info = root->fs_info; 5159 struct btrfs_root *log = root->log_root; 5160 struct btrfs_path *path; 5161 LIST_HEAD(dir_list); 5162 struct btrfs_dir_list *dir_elem; 5163 int ret = 0; 5164 5165 path = btrfs_alloc_path(); 5166 if (!path) 5167 return -ENOMEM; 5168 5169 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5170 if (!dir_elem) { 5171 btrfs_free_path(path); 5172 return -ENOMEM; 5173 } 5174 dir_elem->ino = btrfs_ino(start_inode); 5175 list_add_tail(&dir_elem->list, &dir_list); 5176 5177 while (!list_empty(&dir_list)) { 5178 struct extent_buffer *leaf; 5179 struct btrfs_key min_key; 5180 int nritems; 5181 int i; 5182 5183 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5184 list); 5185 if (ret) 5186 goto next_dir_inode; 5187 5188 min_key.objectid = dir_elem->ino; 5189 min_key.type = BTRFS_DIR_ITEM_KEY; 5190 min_key.offset = 0; 5191 again: 5192 btrfs_release_path(path); 5193 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5194 if (ret < 0) { 5195 goto next_dir_inode; 5196 } else if (ret > 0) { 5197 ret = 0; 5198 goto next_dir_inode; 5199 } 5200 5201 process_leaf: 5202 leaf = path->nodes[0]; 5203 nritems = btrfs_header_nritems(leaf); 5204 for (i = path->slots[0]; i < nritems; i++) { 5205 struct btrfs_dir_item *di; 5206 struct btrfs_key di_key; 5207 struct inode *di_inode; 5208 struct btrfs_dir_list *new_dir_elem; 5209 int log_mode = LOG_INODE_EXISTS; 5210 int type; 5211 5212 btrfs_item_key_to_cpu(leaf, &min_key, i); 5213 if (min_key.objectid != dir_elem->ino || 5214 min_key.type != BTRFS_DIR_ITEM_KEY) 5215 goto next_dir_inode; 5216 5217 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5218 type = btrfs_dir_type(leaf, di); 5219 if (btrfs_dir_transid(leaf, di) < trans->transid && 5220 type != BTRFS_FT_DIR) 5221 continue; 5222 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5223 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5224 continue; 5225 5226 btrfs_release_path(path); 5227 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL); 5228 if (IS_ERR(di_inode)) { 5229 ret = PTR_ERR(di_inode); 5230 goto next_dir_inode; 5231 } 5232 5233 if (btrfs_inode_in_log(di_inode, trans->transid)) { 5234 iput(di_inode); 5235 break; 5236 } 5237 5238 ctx->log_new_dentries = false; 5239 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5240 log_mode = LOG_INODE_ALL; 5241 ret = btrfs_log_inode(trans, root, di_inode, 5242 log_mode, 0, LLONG_MAX, ctx); 5243 if (!ret && 5244 btrfs_must_commit_transaction(trans, di_inode)) 5245 ret = 1; 5246 iput(di_inode); 5247 if (ret) 5248 goto next_dir_inode; 5249 if (ctx->log_new_dentries) { 5250 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5251 GFP_NOFS); 5252 if (!new_dir_elem) { 5253 ret = -ENOMEM; 5254 goto next_dir_inode; 5255 } 5256 new_dir_elem->ino = di_key.objectid; 5257 list_add_tail(&new_dir_elem->list, &dir_list); 5258 } 5259 break; 5260 } 5261 if (i == nritems) { 5262 ret = btrfs_next_leaf(log, path); 5263 if (ret < 0) { 5264 goto next_dir_inode; 5265 } else if (ret > 0) { 5266 ret = 0; 5267 goto next_dir_inode; 5268 } 5269 goto process_leaf; 5270 } 5271 if (min_key.offset < (u64)-1) { 5272 min_key.offset++; 5273 goto again; 5274 } 5275 next_dir_inode: 5276 list_del(&dir_elem->list); 5277 kfree(dir_elem); 5278 } 5279 5280 btrfs_free_path(path); 5281 return ret; 5282 } 5283 5284 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5285 struct inode *inode, 5286 struct btrfs_log_ctx *ctx) 5287 { 5288 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5289 int ret; 5290 struct btrfs_path *path; 5291 struct btrfs_key key; 5292 struct btrfs_root *root = BTRFS_I(inode)->root; 5293 const u64 ino = btrfs_ino(inode); 5294 5295 path = btrfs_alloc_path(); 5296 if (!path) 5297 return -ENOMEM; 5298 path->skip_locking = 1; 5299 path->search_commit_root = 1; 5300 5301 key.objectid = ino; 5302 key.type = BTRFS_INODE_REF_KEY; 5303 key.offset = 0; 5304 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5305 if (ret < 0) 5306 goto out; 5307 5308 while (true) { 5309 struct extent_buffer *leaf = path->nodes[0]; 5310 int slot = path->slots[0]; 5311 u32 cur_offset = 0; 5312 u32 item_size; 5313 unsigned long ptr; 5314 5315 if (slot >= btrfs_header_nritems(leaf)) { 5316 ret = btrfs_next_leaf(root, path); 5317 if (ret < 0) 5318 goto out; 5319 else if (ret > 0) 5320 break; 5321 continue; 5322 } 5323 5324 btrfs_item_key_to_cpu(leaf, &key, slot); 5325 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5326 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5327 break; 5328 5329 item_size = btrfs_item_size_nr(leaf, slot); 5330 ptr = btrfs_item_ptr_offset(leaf, slot); 5331 while (cur_offset < item_size) { 5332 struct btrfs_key inode_key; 5333 struct inode *dir_inode; 5334 5335 inode_key.type = BTRFS_INODE_ITEM_KEY; 5336 inode_key.offset = 0; 5337 5338 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5339 struct btrfs_inode_extref *extref; 5340 5341 extref = (struct btrfs_inode_extref *) 5342 (ptr + cur_offset); 5343 inode_key.objectid = btrfs_inode_extref_parent( 5344 leaf, extref); 5345 cur_offset += sizeof(*extref); 5346 cur_offset += btrfs_inode_extref_name_len(leaf, 5347 extref); 5348 } else { 5349 inode_key.objectid = key.offset; 5350 cur_offset = item_size; 5351 } 5352 5353 dir_inode = btrfs_iget(fs_info->sb, &inode_key, 5354 root, NULL); 5355 /* If parent inode was deleted, skip it. */ 5356 if (IS_ERR(dir_inode)) 5357 continue; 5358 5359 if (ctx) 5360 ctx->log_new_dentries = false; 5361 ret = btrfs_log_inode(trans, root, dir_inode, 5362 LOG_INODE_ALL, 0, LLONG_MAX, ctx); 5363 if (!ret && 5364 btrfs_must_commit_transaction(trans, dir_inode)) 5365 ret = 1; 5366 if (!ret && ctx && ctx->log_new_dentries) 5367 ret = log_new_dir_dentries(trans, root, 5368 dir_inode, ctx); 5369 iput(dir_inode); 5370 if (ret) 5371 goto out; 5372 } 5373 path->slots[0]++; 5374 } 5375 ret = 0; 5376 out: 5377 btrfs_free_path(path); 5378 return ret; 5379 } 5380 5381 /* 5382 * helper function around btrfs_log_inode to make sure newly created 5383 * parent directories also end up in the log. A minimal inode and backref 5384 * only logging is done of any parent directories that are older than 5385 * the last committed transaction 5386 */ 5387 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 5388 struct btrfs_root *root, struct inode *inode, 5389 struct dentry *parent, 5390 const loff_t start, 5391 const loff_t end, 5392 int exists_only, 5393 struct btrfs_log_ctx *ctx) 5394 { 5395 struct btrfs_fs_info *fs_info = root->fs_info; 5396 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 5397 struct super_block *sb; 5398 struct dentry *old_parent = NULL; 5399 int ret = 0; 5400 u64 last_committed = fs_info->last_trans_committed; 5401 bool log_dentries = false; 5402 struct inode *orig_inode = inode; 5403 5404 sb = inode->i_sb; 5405 5406 if (btrfs_test_opt(fs_info, NOTREELOG)) { 5407 ret = 1; 5408 goto end_no_trans; 5409 } 5410 5411 /* 5412 * The prev transaction commit doesn't complete, we need do 5413 * full commit by ourselves. 5414 */ 5415 if (fs_info->last_trans_log_full_commit > 5416 fs_info->last_trans_committed) { 5417 ret = 1; 5418 goto end_no_trans; 5419 } 5420 5421 if (root != BTRFS_I(inode)->root || 5422 btrfs_root_refs(&root->root_item) == 0) { 5423 ret = 1; 5424 goto end_no_trans; 5425 } 5426 5427 ret = check_parent_dirs_for_sync(trans, inode, parent, 5428 sb, last_committed); 5429 if (ret) 5430 goto end_no_trans; 5431 5432 if (btrfs_inode_in_log(inode, trans->transid)) { 5433 ret = BTRFS_NO_LOG_SYNC; 5434 goto end_no_trans; 5435 } 5436 5437 ret = start_log_trans(trans, root, ctx); 5438 if (ret) 5439 goto end_no_trans; 5440 5441 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); 5442 if (ret) 5443 goto end_trans; 5444 5445 /* 5446 * for regular files, if its inode is already on disk, we don't 5447 * have to worry about the parents at all. This is because 5448 * we can use the last_unlink_trans field to record renames 5449 * and other fun in this file. 5450 */ 5451 if (S_ISREG(inode->i_mode) && 5452 BTRFS_I(inode)->generation <= last_committed && 5453 BTRFS_I(inode)->last_unlink_trans <= last_committed) { 5454 ret = 0; 5455 goto end_trans; 5456 } 5457 5458 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries) 5459 log_dentries = true; 5460 5461 /* 5462 * On unlink we must make sure all our current and old parent directory 5463 * inodes are fully logged. This is to prevent leaving dangling 5464 * directory index entries in directories that were our parents but are 5465 * not anymore. Not doing this results in old parent directory being 5466 * impossible to delete after log replay (rmdir will always fail with 5467 * error -ENOTEMPTY). 5468 * 5469 * Example 1: 5470 * 5471 * mkdir testdir 5472 * touch testdir/foo 5473 * ln testdir/foo testdir/bar 5474 * sync 5475 * unlink testdir/bar 5476 * xfs_io -c fsync testdir/foo 5477 * <power failure> 5478 * mount fs, triggers log replay 5479 * 5480 * If we don't log the parent directory (testdir), after log replay the 5481 * directory still has an entry pointing to the file inode using the bar 5482 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 5483 * the file inode has a link count of 1. 5484 * 5485 * Example 2: 5486 * 5487 * mkdir testdir 5488 * touch foo 5489 * ln foo testdir/foo2 5490 * ln foo testdir/foo3 5491 * sync 5492 * unlink testdir/foo3 5493 * xfs_io -c fsync foo 5494 * <power failure> 5495 * mount fs, triggers log replay 5496 * 5497 * Similar as the first example, after log replay the parent directory 5498 * testdir still has an entry pointing to the inode file with name foo3 5499 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 5500 * and has a link count of 2. 5501 */ 5502 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 5503 ret = btrfs_log_all_parents(trans, orig_inode, ctx); 5504 if (ret) 5505 goto end_trans; 5506 } 5507 5508 while (1) { 5509 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5510 break; 5511 5512 inode = d_inode(parent); 5513 if (root != BTRFS_I(inode)->root) 5514 break; 5515 5516 if (BTRFS_I(inode)->generation > last_committed) { 5517 ret = btrfs_log_inode(trans, root, inode, 5518 LOG_INODE_EXISTS, 5519 0, LLONG_MAX, ctx); 5520 if (ret) 5521 goto end_trans; 5522 } 5523 if (IS_ROOT(parent)) 5524 break; 5525 5526 parent = dget_parent(parent); 5527 dput(old_parent); 5528 old_parent = parent; 5529 } 5530 if (log_dentries) 5531 ret = log_new_dir_dentries(trans, root, orig_inode, ctx); 5532 else 5533 ret = 0; 5534 end_trans: 5535 dput(old_parent); 5536 if (ret < 0) { 5537 btrfs_set_log_full_commit(fs_info, trans); 5538 ret = 1; 5539 } 5540 5541 if (ret) 5542 btrfs_remove_log_ctx(root, ctx); 5543 btrfs_end_log_trans(root); 5544 end_no_trans: 5545 return ret; 5546 } 5547 5548 /* 5549 * it is not safe to log dentry if the chunk root has added new 5550 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 5551 * If this returns 1, you must commit the transaction to safely get your 5552 * data on disk. 5553 */ 5554 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 5555 struct btrfs_root *root, struct dentry *dentry, 5556 const loff_t start, 5557 const loff_t end, 5558 struct btrfs_log_ctx *ctx) 5559 { 5560 struct dentry *parent = dget_parent(dentry); 5561 int ret; 5562 5563 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent, 5564 start, end, 0, ctx); 5565 dput(parent); 5566 5567 return ret; 5568 } 5569 5570 /* 5571 * should be called during mount to recover any replay any log trees 5572 * from the FS 5573 */ 5574 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 5575 { 5576 int ret; 5577 struct btrfs_path *path; 5578 struct btrfs_trans_handle *trans; 5579 struct btrfs_key key; 5580 struct btrfs_key found_key; 5581 struct btrfs_key tmp_key; 5582 struct btrfs_root *log; 5583 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 5584 struct walk_control wc = { 5585 .process_func = process_one_buffer, 5586 .stage = 0, 5587 }; 5588 5589 path = btrfs_alloc_path(); 5590 if (!path) 5591 return -ENOMEM; 5592 5593 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 5594 5595 trans = btrfs_start_transaction(fs_info->tree_root, 0); 5596 if (IS_ERR(trans)) { 5597 ret = PTR_ERR(trans); 5598 goto error; 5599 } 5600 5601 wc.trans = trans; 5602 wc.pin = 1; 5603 5604 ret = walk_log_tree(trans, log_root_tree, &wc); 5605 if (ret) { 5606 btrfs_handle_fs_error(fs_info, ret, 5607 "Failed to pin buffers while recovering log root tree."); 5608 goto error; 5609 } 5610 5611 again: 5612 key.objectid = BTRFS_TREE_LOG_OBJECTID; 5613 key.offset = (u64)-1; 5614 key.type = BTRFS_ROOT_ITEM_KEY; 5615 5616 while (1) { 5617 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 5618 5619 if (ret < 0) { 5620 btrfs_handle_fs_error(fs_info, ret, 5621 "Couldn't find tree log root."); 5622 goto error; 5623 } 5624 if (ret > 0) { 5625 if (path->slots[0] == 0) 5626 break; 5627 path->slots[0]--; 5628 } 5629 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 5630 path->slots[0]); 5631 btrfs_release_path(path); 5632 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 5633 break; 5634 5635 log = btrfs_read_fs_root(log_root_tree, &found_key); 5636 if (IS_ERR(log)) { 5637 ret = PTR_ERR(log); 5638 btrfs_handle_fs_error(fs_info, ret, 5639 "Couldn't read tree log root."); 5640 goto error; 5641 } 5642 5643 tmp_key.objectid = found_key.offset; 5644 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 5645 tmp_key.offset = (u64)-1; 5646 5647 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 5648 if (IS_ERR(wc.replay_dest)) { 5649 ret = PTR_ERR(wc.replay_dest); 5650 free_extent_buffer(log->node); 5651 free_extent_buffer(log->commit_root); 5652 kfree(log); 5653 btrfs_handle_fs_error(fs_info, ret, 5654 "Couldn't read target root for tree log recovery."); 5655 goto error; 5656 } 5657 5658 wc.replay_dest->log_root = log; 5659 btrfs_record_root_in_trans(trans, wc.replay_dest); 5660 ret = walk_log_tree(trans, log, &wc); 5661 5662 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 5663 ret = fixup_inode_link_counts(trans, wc.replay_dest, 5664 path); 5665 } 5666 5667 key.offset = found_key.offset - 1; 5668 wc.replay_dest->log_root = NULL; 5669 free_extent_buffer(log->node); 5670 free_extent_buffer(log->commit_root); 5671 kfree(log); 5672 5673 if (ret) 5674 goto error; 5675 5676 if (found_key.offset == 0) 5677 break; 5678 } 5679 btrfs_release_path(path); 5680 5681 /* step one is to pin it all, step two is to replay just inodes */ 5682 if (wc.pin) { 5683 wc.pin = 0; 5684 wc.process_func = replay_one_buffer; 5685 wc.stage = LOG_WALK_REPLAY_INODES; 5686 goto again; 5687 } 5688 /* step three is to replay everything */ 5689 if (wc.stage < LOG_WALK_REPLAY_ALL) { 5690 wc.stage++; 5691 goto again; 5692 } 5693 5694 btrfs_free_path(path); 5695 5696 /* step 4: commit the transaction, which also unpins the blocks */ 5697 ret = btrfs_commit_transaction(trans); 5698 if (ret) 5699 return ret; 5700 5701 free_extent_buffer(log_root_tree->node); 5702 log_root_tree->log_root = NULL; 5703 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 5704 kfree(log_root_tree); 5705 5706 return 0; 5707 error: 5708 if (wc.trans) 5709 btrfs_end_transaction(wc.trans); 5710 btrfs_free_path(path); 5711 return ret; 5712 } 5713 5714 /* 5715 * there are some corner cases where we want to force a full 5716 * commit instead of allowing a directory to be logged. 5717 * 5718 * They revolve around files there were unlinked from the directory, and 5719 * this function updates the parent directory so that a full commit is 5720 * properly done if it is fsync'd later after the unlinks are done. 5721 * 5722 * Must be called before the unlink operations (updates to the subvolume tree, 5723 * inodes, etc) are done. 5724 */ 5725 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 5726 struct inode *dir, struct inode *inode, 5727 int for_rename) 5728 { 5729 /* 5730 * when we're logging a file, if it hasn't been renamed 5731 * or unlinked, and its inode is fully committed on disk, 5732 * we don't have to worry about walking up the directory chain 5733 * to log its parents. 5734 * 5735 * So, we use the last_unlink_trans field to put this transid 5736 * into the file. When the file is logged we check it and 5737 * don't log the parents if the file is fully on disk. 5738 */ 5739 mutex_lock(&BTRFS_I(inode)->log_mutex); 5740 BTRFS_I(inode)->last_unlink_trans = trans->transid; 5741 mutex_unlock(&BTRFS_I(inode)->log_mutex); 5742 5743 /* 5744 * if this directory was already logged any new 5745 * names for this file/dir will get recorded 5746 */ 5747 smp_mb(); 5748 if (BTRFS_I(dir)->logged_trans == trans->transid) 5749 return; 5750 5751 /* 5752 * if the inode we're about to unlink was logged, 5753 * the log will be properly updated for any new names 5754 */ 5755 if (BTRFS_I(inode)->logged_trans == trans->transid) 5756 return; 5757 5758 /* 5759 * when renaming files across directories, if the directory 5760 * there we're unlinking from gets fsync'd later on, there's 5761 * no way to find the destination directory later and fsync it 5762 * properly. So, we have to be conservative and force commits 5763 * so the new name gets discovered. 5764 */ 5765 if (for_rename) 5766 goto record; 5767 5768 /* we can safely do the unlink without any special recording */ 5769 return; 5770 5771 record: 5772 mutex_lock(&BTRFS_I(dir)->log_mutex); 5773 BTRFS_I(dir)->last_unlink_trans = trans->transid; 5774 mutex_unlock(&BTRFS_I(dir)->log_mutex); 5775 } 5776 5777 /* 5778 * Make sure that if someone attempts to fsync the parent directory of a deleted 5779 * snapshot, it ends up triggering a transaction commit. This is to guarantee 5780 * that after replaying the log tree of the parent directory's root we will not 5781 * see the snapshot anymore and at log replay time we will not see any log tree 5782 * corresponding to the deleted snapshot's root, which could lead to replaying 5783 * it after replaying the log tree of the parent directory (which would replay 5784 * the snapshot delete operation). 5785 * 5786 * Must be called before the actual snapshot destroy operation (updates to the 5787 * parent root and tree of tree roots trees, etc) are done. 5788 */ 5789 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 5790 struct inode *dir) 5791 { 5792 mutex_lock(&BTRFS_I(dir)->log_mutex); 5793 BTRFS_I(dir)->last_unlink_trans = trans->transid; 5794 mutex_unlock(&BTRFS_I(dir)->log_mutex); 5795 } 5796 5797 /* 5798 * Call this after adding a new name for a file and it will properly 5799 * update the log to reflect the new name. 5800 * 5801 * It will return zero if all goes well, and it will return 1 if a 5802 * full transaction commit is required. 5803 */ 5804 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 5805 struct inode *inode, struct inode *old_dir, 5806 struct dentry *parent) 5807 { 5808 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5809 struct btrfs_root * root = BTRFS_I(inode)->root; 5810 5811 /* 5812 * this will force the logging code to walk the dentry chain 5813 * up for the file 5814 */ 5815 if (S_ISREG(inode->i_mode)) 5816 BTRFS_I(inode)->last_unlink_trans = trans->transid; 5817 5818 /* 5819 * if this inode hasn't been logged and directory we're renaming it 5820 * from hasn't been logged, we don't need to log it 5821 */ 5822 if (BTRFS_I(inode)->logged_trans <= 5823 fs_info->last_trans_committed && 5824 (!old_dir || BTRFS_I(old_dir)->logged_trans <= 5825 fs_info->last_trans_committed)) 5826 return 0; 5827 5828 return btrfs_log_inode_parent(trans, root, inode, parent, 0, 5829 LLONG_MAX, 1, NULL); 5830 } 5831 5832