1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/list_sort.h> 10 #include <linux/iversion.h> 11 #include "misc.h" 12 #include "ctree.h" 13 #include "tree-log.h" 14 #include "disk-io.h" 15 #include "locking.h" 16 #include "print-tree.h" 17 #include "backref.h" 18 #include "compression.h" 19 #include "qgroup.h" 20 #include "inode-map.h" 21 22 /* magic values for the inode_only field in btrfs_log_inode: 23 * 24 * LOG_INODE_ALL means to log everything 25 * LOG_INODE_EXISTS means to log just enough to recreate the inode 26 * during log replay 27 */ 28 enum { 29 LOG_INODE_ALL, 30 LOG_INODE_EXISTS, 31 LOG_OTHER_INODE, 32 LOG_OTHER_INODE_ALL, 33 }; 34 35 /* 36 * directory trouble cases 37 * 38 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 39 * log, we must force a full commit before doing an fsync of the directory 40 * where the unlink was done. 41 * ---> record transid of last unlink/rename per directory 42 * 43 * mkdir foo/some_dir 44 * normal commit 45 * rename foo/some_dir foo2/some_dir 46 * mkdir foo/some_dir 47 * fsync foo/some_dir/some_file 48 * 49 * The fsync above will unlink the original some_dir without recording 50 * it in its new location (foo2). After a crash, some_dir will be gone 51 * unless the fsync of some_file forces a full commit 52 * 53 * 2) we must log any new names for any file or dir that is in the fsync 54 * log. ---> check inode while renaming/linking. 55 * 56 * 2a) we must log any new names for any file or dir during rename 57 * when the directory they are being removed from was logged. 58 * ---> check inode and old parent dir during rename 59 * 60 * 2a is actually the more important variant. With the extra logging 61 * a crash might unlink the old name without recreating the new one 62 * 63 * 3) after a crash, we must go through any directories with a link count 64 * of zero and redo the rm -rf 65 * 66 * mkdir f1/foo 67 * normal commit 68 * rm -rf f1/foo 69 * fsync(f1) 70 * 71 * The directory f1 was fully removed from the FS, but fsync was never 72 * called on f1, only its parent dir. After a crash the rm -rf must 73 * be replayed. This must be able to recurse down the entire 74 * directory tree. The inode link count fixup code takes care of the 75 * ugly details. 76 */ 77 78 /* 79 * stages for the tree walking. The first 80 * stage (0) is to only pin down the blocks we find 81 * the second stage (1) is to make sure that all the inodes 82 * we find in the log are created in the subvolume. 83 * 84 * The last stage is to deal with directories and links and extents 85 * and all the other fun semantics 86 */ 87 enum { 88 LOG_WALK_PIN_ONLY, 89 LOG_WALK_REPLAY_INODES, 90 LOG_WALK_REPLAY_DIR_INDEX, 91 LOG_WALK_REPLAY_ALL, 92 }; 93 94 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 95 struct btrfs_root *root, struct btrfs_inode *inode, 96 int inode_only, 97 const loff_t start, 98 const loff_t end, 99 struct btrfs_log_ctx *ctx); 100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 101 struct btrfs_root *root, 102 struct btrfs_path *path, u64 objectid); 103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 104 struct btrfs_root *root, 105 struct btrfs_root *log, 106 struct btrfs_path *path, 107 u64 dirid, int del_all); 108 109 /* 110 * tree logging is a special write ahead log used to make sure that 111 * fsyncs and O_SYNCs can happen without doing full tree commits. 112 * 113 * Full tree commits are expensive because they require commonly 114 * modified blocks to be recowed, creating many dirty pages in the 115 * extent tree an 4x-6x higher write load than ext3. 116 * 117 * Instead of doing a tree commit on every fsync, we use the 118 * key ranges and transaction ids to find items for a given file or directory 119 * that have changed in this transaction. Those items are copied into 120 * a special tree (one per subvolume root), that tree is written to disk 121 * and then the fsync is considered complete. 122 * 123 * After a crash, items are copied out of the log-tree back into the 124 * subvolume tree. Any file data extents found are recorded in the extent 125 * allocation tree, and the log-tree freed. 126 * 127 * The log tree is read three times, once to pin down all the extents it is 128 * using in ram and once, once to create all the inodes logged in the tree 129 * and once to do all the other items. 130 */ 131 132 /* 133 * start a sub transaction and setup the log tree 134 * this increments the log tree writer count to make the people 135 * syncing the tree wait for us to finish 136 */ 137 static int start_log_trans(struct btrfs_trans_handle *trans, 138 struct btrfs_root *root, 139 struct btrfs_log_ctx *ctx) 140 { 141 struct btrfs_fs_info *fs_info = root->fs_info; 142 int ret = 0; 143 144 mutex_lock(&root->log_mutex); 145 146 if (root->log_root) { 147 if (btrfs_need_log_full_commit(trans)) { 148 ret = -EAGAIN; 149 goto out; 150 } 151 152 if (!root->log_start_pid) { 153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 154 root->log_start_pid = current->pid; 155 } else if (root->log_start_pid != current->pid) { 156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 157 } 158 } else { 159 mutex_lock(&fs_info->tree_log_mutex); 160 if (!fs_info->log_root_tree) 161 ret = btrfs_init_log_root_tree(trans, fs_info); 162 mutex_unlock(&fs_info->tree_log_mutex); 163 if (ret) 164 goto out; 165 166 ret = btrfs_add_log_tree(trans, root); 167 if (ret) 168 goto out; 169 170 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 171 root->log_start_pid = current->pid; 172 } 173 174 atomic_inc(&root->log_batch); 175 atomic_inc(&root->log_writers); 176 if (ctx) { 177 int index = root->log_transid % 2; 178 list_add_tail(&ctx->list, &root->log_ctxs[index]); 179 ctx->log_transid = root->log_transid; 180 } 181 182 out: 183 mutex_unlock(&root->log_mutex); 184 return ret; 185 } 186 187 /* 188 * returns 0 if there was a log transaction running and we were able 189 * to join, or returns -ENOENT if there were not transactions 190 * in progress 191 */ 192 static int join_running_log_trans(struct btrfs_root *root) 193 { 194 int ret = -ENOENT; 195 196 mutex_lock(&root->log_mutex); 197 if (root->log_root) { 198 ret = 0; 199 atomic_inc(&root->log_writers); 200 } 201 mutex_unlock(&root->log_mutex); 202 return ret; 203 } 204 205 /* 206 * This either makes the current running log transaction wait 207 * until you call btrfs_end_log_trans() or it makes any future 208 * log transactions wait until you call btrfs_end_log_trans() 209 */ 210 void btrfs_pin_log_trans(struct btrfs_root *root) 211 { 212 mutex_lock(&root->log_mutex); 213 atomic_inc(&root->log_writers); 214 mutex_unlock(&root->log_mutex); 215 } 216 217 /* 218 * indicate we're done making changes to the log tree 219 * and wake up anyone waiting to do a sync 220 */ 221 void btrfs_end_log_trans(struct btrfs_root *root) 222 { 223 if (atomic_dec_and_test(&root->log_writers)) { 224 /* atomic_dec_and_test implies a barrier */ 225 cond_wake_up_nomb(&root->log_writer_wait); 226 } 227 } 228 229 static int btrfs_write_tree_block(struct extent_buffer *buf) 230 { 231 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, 232 buf->start + buf->len - 1); 233 } 234 235 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 236 { 237 filemap_fdatawait_range(buf->pages[0]->mapping, 238 buf->start, buf->start + buf->len - 1); 239 } 240 241 /* 242 * the walk control struct is used to pass state down the chain when 243 * processing the log tree. The stage field tells us which part 244 * of the log tree processing we are currently doing. The others 245 * are state fields used for that specific part 246 */ 247 struct walk_control { 248 /* should we free the extent on disk when done? This is used 249 * at transaction commit time while freeing a log tree 250 */ 251 int free; 252 253 /* should we write out the extent buffer? This is used 254 * while flushing the log tree to disk during a sync 255 */ 256 int write; 257 258 /* should we wait for the extent buffer io to finish? Also used 259 * while flushing the log tree to disk for a sync 260 */ 261 int wait; 262 263 /* pin only walk, we record which extents on disk belong to the 264 * log trees 265 */ 266 int pin; 267 268 /* what stage of the replay code we're currently in */ 269 int stage; 270 271 /* 272 * Ignore any items from the inode currently being processed. Needs 273 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in 274 * the LOG_WALK_REPLAY_INODES stage. 275 */ 276 bool ignore_cur_inode; 277 278 /* the root we are currently replaying */ 279 struct btrfs_root *replay_dest; 280 281 /* the trans handle for the current replay */ 282 struct btrfs_trans_handle *trans; 283 284 /* the function that gets used to process blocks we find in the 285 * tree. Note the extent_buffer might not be up to date when it is 286 * passed in, and it must be checked or read if you need the data 287 * inside it 288 */ 289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 290 struct walk_control *wc, u64 gen, int level); 291 }; 292 293 /* 294 * process_func used to pin down extents, write them or wait on them 295 */ 296 static int process_one_buffer(struct btrfs_root *log, 297 struct extent_buffer *eb, 298 struct walk_control *wc, u64 gen, int level) 299 { 300 struct btrfs_fs_info *fs_info = log->fs_info; 301 int ret = 0; 302 303 /* 304 * If this fs is mixed then we need to be able to process the leaves to 305 * pin down any logged extents, so we have to read the block. 306 */ 307 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 308 ret = btrfs_read_buffer(eb, gen, level, NULL); 309 if (ret) 310 return ret; 311 } 312 313 if (wc->pin) 314 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, 315 eb->len); 316 317 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 318 if (wc->pin && btrfs_header_level(eb) == 0) 319 ret = btrfs_exclude_logged_extents(eb); 320 if (wc->write) 321 btrfs_write_tree_block(eb); 322 if (wc->wait) 323 btrfs_wait_tree_block_writeback(eb); 324 } 325 return ret; 326 } 327 328 /* 329 * Item overwrite used by replay and tree logging. eb, slot and key all refer 330 * to the src data we are copying out. 331 * 332 * root is the tree we are copying into, and path is a scratch 333 * path for use in this function (it should be released on entry and 334 * will be released on exit). 335 * 336 * If the key is already in the destination tree the existing item is 337 * overwritten. If the existing item isn't big enough, it is extended. 338 * If it is too large, it is truncated. 339 * 340 * If the key isn't in the destination yet, a new item is inserted. 341 */ 342 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 343 struct btrfs_root *root, 344 struct btrfs_path *path, 345 struct extent_buffer *eb, int slot, 346 struct btrfs_key *key) 347 { 348 int ret; 349 u32 item_size; 350 u64 saved_i_size = 0; 351 int save_old_i_size = 0; 352 unsigned long src_ptr; 353 unsigned long dst_ptr; 354 int overwrite_root = 0; 355 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 356 357 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 358 overwrite_root = 1; 359 360 item_size = btrfs_item_size_nr(eb, slot); 361 src_ptr = btrfs_item_ptr_offset(eb, slot); 362 363 /* look for the key in the destination tree */ 364 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 365 if (ret < 0) 366 return ret; 367 368 if (ret == 0) { 369 char *src_copy; 370 char *dst_copy; 371 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 372 path->slots[0]); 373 if (dst_size != item_size) 374 goto insert; 375 376 if (item_size == 0) { 377 btrfs_release_path(path); 378 return 0; 379 } 380 dst_copy = kmalloc(item_size, GFP_NOFS); 381 src_copy = kmalloc(item_size, GFP_NOFS); 382 if (!dst_copy || !src_copy) { 383 btrfs_release_path(path); 384 kfree(dst_copy); 385 kfree(src_copy); 386 return -ENOMEM; 387 } 388 389 read_extent_buffer(eb, src_copy, src_ptr, item_size); 390 391 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 392 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 393 item_size); 394 ret = memcmp(dst_copy, src_copy, item_size); 395 396 kfree(dst_copy); 397 kfree(src_copy); 398 /* 399 * they have the same contents, just return, this saves 400 * us from cowing blocks in the destination tree and doing 401 * extra writes that may not have been done by a previous 402 * sync 403 */ 404 if (ret == 0) { 405 btrfs_release_path(path); 406 return 0; 407 } 408 409 /* 410 * We need to load the old nbytes into the inode so when we 411 * replay the extents we've logged we get the right nbytes. 412 */ 413 if (inode_item) { 414 struct btrfs_inode_item *item; 415 u64 nbytes; 416 u32 mode; 417 418 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 419 struct btrfs_inode_item); 420 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 421 item = btrfs_item_ptr(eb, slot, 422 struct btrfs_inode_item); 423 btrfs_set_inode_nbytes(eb, item, nbytes); 424 425 /* 426 * If this is a directory we need to reset the i_size to 427 * 0 so that we can set it up properly when replaying 428 * the rest of the items in this log. 429 */ 430 mode = btrfs_inode_mode(eb, item); 431 if (S_ISDIR(mode)) 432 btrfs_set_inode_size(eb, item, 0); 433 } 434 } else if (inode_item) { 435 struct btrfs_inode_item *item; 436 u32 mode; 437 438 /* 439 * New inode, set nbytes to 0 so that the nbytes comes out 440 * properly when we replay the extents. 441 */ 442 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 443 btrfs_set_inode_nbytes(eb, item, 0); 444 445 /* 446 * If this is a directory we need to reset the i_size to 0 so 447 * that we can set it up properly when replaying the rest of 448 * the items in this log. 449 */ 450 mode = btrfs_inode_mode(eb, item); 451 if (S_ISDIR(mode)) 452 btrfs_set_inode_size(eb, item, 0); 453 } 454 insert: 455 btrfs_release_path(path); 456 /* try to insert the key into the destination tree */ 457 path->skip_release_on_error = 1; 458 ret = btrfs_insert_empty_item(trans, root, path, 459 key, item_size); 460 path->skip_release_on_error = 0; 461 462 /* make sure any existing item is the correct size */ 463 if (ret == -EEXIST || ret == -EOVERFLOW) { 464 u32 found_size; 465 found_size = btrfs_item_size_nr(path->nodes[0], 466 path->slots[0]); 467 if (found_size > item_size) 468 btrfs_truncate_item(path, item_size, 1); 469 else if (found_size < item_size) 470 btrfs_extend_item(path, item_size - found_size); 471 } else if (ret) { 472 return ret; 473 } 474 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 475 path->slots[0]); 476 477 /* don't overwrite an existing inode if the generation number 478 * was logged as zero. This is done when the tree logging code 479 * is just logging an inode to make sure it exists after recovery. 480 * 481 * Also, don't overwrite i_size on directories during replay. 482 * log replay inserts and removes directory items based on the 483 * state of the tree found in the subvolume, and i_size is modified 484 * as it goes 485 */ 486 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 487 struct btrfs_inode_item *src_item; 488 struct btrfs_inode_item *dst_item; 489 490 src_item = (struct btrfs_inode_item *)src_ptr; 491 dst_item = (struct btrfs_inode_item *)dst_ptr; 492 493 if (btrfs_inode_generation(eb, src_item) == 0) { 494 struct extent_buffer *dst_eb = path->nodes[0]; 495 const u64 ino_size = btrfs_inode_size(eb, src_item); 496 497 /* 498 * For regular files an ino_size == 0 is used only when 499 * logging that an inode exists, as part of a directory 500 * fsync, and the inode wasn't fsynced before. In this 501 * case don't set the size of the inode in the fs/subvol 502 * tree, otherwise we would be throwing valid data away. 503 */ 504 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 505 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 506 ino_size != 0) { 507 struct btrfs_map_token token; 508 509 btrfs_init_map_token(&token, dst_eb); 510 btrfs_set_token_inode_size(dst_eb, dst_item, 511 ino_size, &token); 512 } 513 goto no_copy; 514 } 515 516 if (overwrite_root && 517 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 518 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 519 save_old_i_size = 1; 520 saved_i_size = btrfs_inode_size(path->nodes[0], 521 dst_item); 522 } 523 } 524 525 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 526 src_ptr, item_size); 527 528 if (save_old_i_size) { 529 struct btrfs_inode_item *dst_item; 530 dst_item = (struct btrfs_inode_item *)dst_ptr; 531 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 532 } 533 534 /* make sure the generation is filled in */ 535 if (key->type == BTRFS_INODE_ITEM_KEY) { 536 struct btrfs_inode_item *dst_item; 537 dst_item = (struct btrfs_inode_item *)dst_ptr; 538 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 539 btrfs_set_inode_generation(path->nodes[0], dst_item, 540 trans->transid); 541 } 542 } 543 no_copy: 544 btrfs_mark_buffer_dirty(path->nodes[0]); 545 btrfs_release_path(path); 546 return 0; 547 } 548 549 /* 550 * simple helper to read an inode off the disk from a given root 551 * This can only be called for subvolume roots and not for the log 552 */ 553 static noinline struct inode *read_one_inode(struct btrfs_root *root, 554 u64 objectid) 555 { 556 struct btrfs_key key; 557 struct inode *inode; 558 559 key.objectid = objectid; 560 key.type = BTRFS_INODE_ITEM_KEY; 561 key.offset = 0; 562 inode = btrfs_iget(root->fs_info->sb, &key, root); 563 if (IS_ERR(inode)) 564 inode = NULL; 565 return inode; 566 } 567 568 /* replays a single extent in 'eb' at 'slot' with 'key' into the 569 * subvolume 'root'. path is released on entry and should be released 570 * on exit. 571 * 572 * extents in the log tree have not been allocated out of the extent 573 * tree yet. So, this completes the allocation, taking a reference 574 * as required if the extent already exists or creating a new extent 575 * if it isn't in the extent allocation tree yet. 576 * 577 * The extent is inserted into the file, dropping any existing extents 578 * from the file that overlap the new one. 579 */ 580 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 581 struct btrfs_root *root, 582 struct btrfs_path *path, 583 struct extent_buffer *eb, int slot, 584 struct btrfs_key *key) 585 { 586 struct btrfs_fs_info *fs_info = root->fs_info; 587 int found_type; 588 u64 extent_end; 589 u64 start = key->offset; 590 u64 nbytes = 0; 591 struct btrfs_file_extent_item *item; 592 struct inode *inode = NULL; 593 unsigned long size; 594 int ret = 0; 595 596 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 597 found_type = btrfs_file_extent_type(eb, item); 598 599 if (found_type == BTRFS_FILE_EXTENT_REG || 600 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 601 nbytes = btrfs_file_extent_num_bytes(eb, item); 602 extent_end = start + nbytes; 603 604 /* 605 * We don't add to the inodes nbytes if we are prealloc or a 606 * hole. 607 */ 608 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 609 nbytes = 0; 610 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 611 size = btrfs_file_extent_ram_bytes(eb, item); 612 nbytes = btrfs_file_extent_ram_bytes(eb, item); 613 extent_end = ALIGN(start + size, 614 fs_info->sectorsize); 615 } else { 616 ret = 0; 617 goto out; 618 } 619 620 inode = read_one_inode(root, key->objectid); 621 if (!inode) { 622 ret = -EIO; 623 goto out; 624 } 625 626 /* 627 * first check to see if we already have this extent in the 628 * file. This must be done before the btrfs_drop_extents run 629 * so we don't try to drop this extent. 630 */ 631 ret = btrfs_lookup_file_extent(trans, root, path, 632 btrfs_ino(BTRFS_I(inode)), start, 0); 633 634 if (ret == 0 && 635 (found_type == BTRFS_FILE_EXTENT_REG || 636 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 637 struct btrfs_file_extent_item cmp1; 638 struct btrfs_file_extent_item cmp2; 639 struct btrfs_file_extent_item *existing; 640 struct extent_buffer *leaf; 641 642 leaf = path->nodes[0]; 643 existing = btrfs_item_ptr(leaf, path->slots[0], 644 struct btrfs_file_extent_item); 645 646 read_extent_buffer(eb, &cmp1, (unsigned long)item, 647 sizeof(cmp1)); 648 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 649 sizeof(cmp2)); 650 651 /* 652 * we already have a pointer to this exact extent, 653 * we don't have to do anything 654 */ 655 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 656 btrfs_release_path(path); 657 goto out; 658 } 659 } 660 btrfs_release_path(path); 661 662 /* drop any overlapping extents */ 663 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 664 if (ret) 665 goto out; 666 667 if (found_type == BTRFS_FILE_EXTENT_REG || 668 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 669 u64 offset; 670 unsigned long dest_offset; 671 struct btrfs_key ins; 672 673 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && 674 btrfs_fs_incompat(fs_info, NO_HOLES)) 675 goto update_inode; 676 677 ret = btrfs_insert_empty_item(trans, root, path, key, 678 sizeof(*item)); 679 if (ret) 680 goto out; 681 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 682 path->slots[0]); 683 copy_extent_buffer(path->nodes[0], eb, dest_offset, 684 (unsigned long)item, sizeof(*item)); 685 686 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 687 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 688 ins.type = BTRFS_EXTENT_ITEM_KEY; 689 offset = key->offset - btrfs_file_extent_offset(eb, item); 690 691 /* 692 * Manually record dirty extent, as here we did a shallow 693 * file extent item copy and skip normal backref update, 694 * but modifying extent tree all by ourselves. 695 * So need to manually record dirty extent for qgroup, 696 * as the owner of the file extent changed from log tree 697 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 698 */ 699 ret = btrfs_qgroup_trace_extent(trans, 700 btrfs_file_extent_disk_bytenr(eb, item), 701 btrfs_file_extent_disk_num_bytes(eb, item), 702 GFP_NOFS); 703 if (ret < 0) 704 goto out; 705 706 if (ins.objectid > 0) { 707 struct btrfs_ref ref = { 0 }; 708 u64 csum_start; 709 u64 csum_end; 710 LIST_HEAD(ordered_sums); 711 712 /* 713 * is this extent already allocated in the extent 714 * allocation tree? If so, just add a reference 715 */ 716 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 717 ins.offset); 718 if (ret == 0) { 719 btrfs_init_generic_ref(&ref, 720 BTRFS_ADD_DELAYED_REF, 721 ins.objectid, ins.offset, 0); 722 btrfs_init_data_ref(&ref, 723 root->root_key.objectid, 724 key->objectid, offset); 725 ret = btrfs_inc_extent_ref(trans, &ref); 726 if (ret) 727 goto out; 728 } else { 729 /* 730 * insert the extent pointer in the extent 731 * allocation tree 732 */ 733 ret = btrfs_alloc_logged_file_extent(trans, 734 root->root_key.objectid, 735 key->objectid, offset, &ins); 736 if (ret) 737 goto out; 738 } 739 btrfs_release_path(path); 740 741 if (btrfs_file_extent_compression(eb, item)) { 742 csum_start = ins.objectid; 743 csum_end = csum_start + ins.offset; 744 } else { 745 csum_start = ins.objectid + 746 btrfs_file_extent_offset(eb, item); 747 csum_end = csum_start + 748 btrfs_file_extent_num_bytes(eb, item); 749 } 750 751 ret = btrfs_lookup_csums_range(root->log_root, 752 csum_start, csum_end - 1, 753 &ordered_sums, 0); 754 if (ret) 755 goto out; 756 /* 757 * Now delete all existing cums in the csum root that 758 * cover our range. We do this because we can have an 759 * extent that is completely referenced by one file 760 * extent item and partially referenced by another 761 * file extent item (like after using the clone or 762 * extent_same ioctls). In this case if we end up doing 763 * the replay of the one that partially references the 764 * extent first, and we do not do the csum deletion 765 * below, we can get 2 csum items in the csum tree that 766 * overlap each other. For example, imagine our log has 767 * the two following file extent items: 768 * 769 * key (257 EXTENT_DATA 409600) 770 * extent data disk byte 12845056 nr 102400 771 * extent data offset 20480 nr 20480 ram 102400 772 * 773 * key (257 EXTENT_DATA 819200) 774 * extent data disk byte 12845056 nr 102400 775 * extent data offset 0 nr 102400 ram 102400 776 * 777 * Where the second one fully references the 100K extent 778 * that starts at disk byte 12845056, and the log tree 779 * has a single csum item that covers the entire range 780 * of the extent: 781 * 782 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 783 * 784 * After the first file extent item is replayed, the 785 * csum tree gets the following csum item: 786 * 787 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 788 * 789 * Which covers the 20K sub-range starting at offset 20K 790 * of our extent. Now when we replay the second file 791 * extent item, if we do not delete existing csum items 792 * that cover any of its blocks, we end up getting two 793 * csum items in our csum tree that overlap each other: 794 * 795 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 796 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 797 * 798 * Which is a problem, because after this anyone trying 799 * to lookup up for the checksum of any block of our 800 * extent starting at an offset of 40K or higher, will 801 * end up looking at the second csum item only, which 802 * does not contain the checksum for any block starting 803 * at offset 40K or higher of our extent. 804 */ 805 while (!list_empty(&ordered_sums)) { 806 struct btrfs_ordered_sum *sums; 807 sums = list_entry(ordered_sums.next, 808 struct btrfs_ordered_sum, 809 list); 810 if (!ret) 811 ret = btrfs_del_csums(trans, 812 fs_info->csum_root, 813 sums->bytenr, 814 sums->len); 815 if (!ret) 816 ret = btrfs_csum_file_blocks(trans, 817 fs_info->csum_root, sums); 818 list_del(&sums->list); 819 kfree(sums); 820 } 821 if (ret) 822 goto out; 823 } else { 824 btrfs_release_path(path); 825 } 826 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 827 /* inline extents are easy, we just overwrite them */ 828 ret = overwrite_item(trans, root, path, eb, slot, key); 829 if (ret) 830 goto out; 831 } 832 833 inode_add_bytes(inode, nbytes); 834 update_inode: 835 ret = btrfs_update_inode(trans, root, inode); 836 out: 837 if (inode) 838 iput(inode); 839 return ret; 840 } 841 842 /* 843 * when cleaning up conflicts between the directory names in the 844 * subvolume, directory names in the log and directory names in the 845 * inode back references, we may have to unlink inodes from directories. 846 * 847 * This is a helper function to do the unlink of a specific directory 848 * item 849 */ 850 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 851 struct btrfs_root *root, 852 struct btrfs_path *path, 853 struct btrfs_inode *dir, 854 struct btrfs_dir_item *di) 855 { 856 struct inode *inode; 857 char *name; 858 int name_len; 859 struct extent_buffer *leaf; 860 struct btrfs_key location; 861 int ret; 862 863 leaf = path->nodes[0]; 864 865 btrfs_dir_item_key_to_cpu(leaf, di, &location); 866 name_len = btrfs_dir_name_len(leaf, di); 867 name = kmalloc(name_len, GFP_NOFS); 868 if (!name) 869 return -ENOMEM; 870 871 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 872 btrfs_release_path(path); 873 874 inode = read_one_inode(root, location.objectid); 875 if (!inode) { 876 ret = -EIO; 877 goto out; 878 } 879 880 ret = link_to_fixup_dir(trans, root, path, location.objectid); 881 if (ret) 882 goto out; 883 884 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, 885 name_len); 886 if (ret) 887 goto out; 888 else 889 ret = btrfs_run_delayed_items(trans); 890 out: 891 kfree(name); 892 iput(inode); 893 return ret; 894 } 895 896 /* 897 * helper function to see if a given name and sequence number found 898 * in an inode back reference are already in a directory and correctly 899 * point to this inode 900 */ 901 static noinline int inode_in_dir(struct btrfs_root *root, 902 struct btrfs_path *path, 903 u64 dirid, u64 objectid, u64 index, 904 const char *name, int name_len) 905 { 906 struct btrfs_dir_item *di; 907 struct btrfs_key location; 908 int match = 0; 909 910 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 911 index, name, name_len, 0); 912 if (di && !IS_ERR(di)) { 913 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 914 if (location.objectid != objectid) 915 goto out; 916 } else 917 goto out; 918 btrfs_release_path(path); 919 920 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 921 if (di && !IS_ERR(di)) { 922 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 923 if (location.objectid != objectid) 924 goto out; 925 } else 926 goto out; 927 match = 1; 928 out: 929 btrfs_release_path(path); 930 return match; 931 } 932 933 /* 934 * helper function to check a log tree for a named back reference in 935 * an inode. This is used to decide if a back reference that is 936 * found in the subvolume conflicts with what we find in the log. 937 * 938 * inode backreferences may have multiple refs in a single item, 939 * during replay we process one reference at a time, and we don't 940 * want to delete valid links to a file from the subvolume if that 941 * link is also in the log. 942 */ 943 static noinline int backref_in_log(struct btrfs_root *log, 944 struct btrfs_key *key, 945 u64 ref_objectid, 946 const char *name, int namelen) 947 { 948 struct btrfs_path *path; 949 int ret; 950 951 path = btrfs_alloc_path(); 952 if (!path) 953 return -ENOMEM; 954 955 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 956 if (ret < 0) { 957 goto out; 958 } else if (ret == 1) { 959 ret = 0; 960 goto out; 961 } 962 963 if (key->type == BTRFS_INODE_EXTREF_KEY) 964 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], 965 path->slots[0], 966 ref_objectid, 967 name, namelen); 968 else 969 ret = !!btrfs_find_name_in_backref(path->nodes[0], 970 path->slots[0], 971 name, namelen); 972 out: 973 btrfs_free_path(path); 974 return ret; 975 } 976 977 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 978 struct btrfs_root *root, 979 struct btrfs_path *path, 980 struct btrfs_root *log_root, 981 struct btrfs_inode *dir, 982 struct btrfs_inode *inode, 983 u64 inode_objectid, u64 parent_objectid, 984 u64 ref_index, char *name, int namelen, 985 int *search_done) 986 { 987 int ret; 988 char *victim_name; 989 int victim_name_len; 990 struct extent_buffer *leaf; 991 struct btrfs_dir_item *di; 992 struct btrfs_key search_key; 993 struct btrfs_inode_extref *extref; 994 995 again: 996 /* Search old style refs */ 997 search_key.objectid = inode_objectid; 998 search_key.type = BTRFS_INODE_REF_KEY; 999 search_key.offset = parent_objectid; 1000 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1001 if (ret == 0) { 1002 struct btrfs_inode_ref *victim_ref; 1003 unsigned long ptr; 1004 unsigned long ptr_end; 1005 1006 leaf = path->nodes[0]; 1007 1008 /* are we trying to overwrite a back ref for the root directory 1009 * if so, just jump out, we're done 1010 */ 1011 if (search_key.objectid == search_key.offset) 1012 return 1; 1013 1014 /* check all the names in this back reference to see 1015 * if they are in the log. if so, we allow them to stay 1016 * otherwise they must be unlinked as a conflict 1017 */ 1018 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1019 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1020 while (ptr < ptr_end) { 1021 victim_ref = (struct btrfs_inode_ref *)ptr; 1022 victim_name_len = btrfs_inode_ref_name_len(leaf, 1023 victim_ref); 1024 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1025 if (!victim_name) 1026 return -ENOMEM; 1027 1028 read_extent_buffer(leaf, victim_name, 1029 (unsigned long)(victim_ref + 1), 1030 victim_name_len); 1031 1032 ret = backref_in_log(log_root, &search_key, 1033 parent_objectid, victim_name, 1034 victim_name_len); 1035 if (ret < 0) { 1036 kfree(victim_name); 1037 return ret; 1038 } else if (!ret) { 1039 inc_nlink(&inode->vfs_inode); 1040 btrfs_release_path(path); 1041 1042 ret = btrfs_unlink_inode(trans, root, dir, inode, 1043 victim_name, victim_name_len); 1044 kfree(victim_name); 1045 if (ret) 1046 return ret; 1047 ret = btrfs_run_delayed_items(trans); 1048 if (ret) 1049 return ret; 1050 *search_done = 1; 1051 goto again; 1052 } 1053 kfree(victim_name); 1054 1055 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1056 } 1057 1058 /* 1059 * NOTE: we have searched root tree and checked the 1060 * corresponding ref, it does not need to check again. 1061 */ 1062 *search_done = 1; 1063 } 1064 btrfs_release_path(path); 1065 1066 /* Same search but for extended refs */ 1067 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1068 inode_objectid, parent_objectid, 0, 1069 0); 1070 if (!IS_ERR_OR_NULL(extref)) { 1071 u32 item_size; 1072 u32 cur_offset = 0; 1073 unsigned long base; 1074 struct inode *victim_parent; 1075 1076 leaf = path->nodes[0]; 1077 1078 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1079 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1080 1081 while (cur_offset < item_size) { 1082 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1083 1084 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1085 1086 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1087 goto next; 1088 1089 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1090 if (!victim_name) 1091 return -ENOMEM; 1092 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1093 victim_name_len); 1094 1095 search_key.objectid = inode_objectid; 1096 search_key.type = BTRFS_INODE_EXTREF_KEY; 1097 search_key.offset = btrfs_extref_hash(parent_objectid, 1098 victim_name, 1099 victim_name_len); 1100 ret = backref_in_log(log_root, &search_key, 1101 parent_objectid, victim_name, 1102 victim_name_len); 1103 if (ret < 0) { 1104 return ret; 1105 } else if (!ret) { 1106 ret = -ENOENT; 1107 victim_parent = read_one_inode(root, 1108 parent_objectid); 1109 if (victim_parent) { 1110 inc_nlink(&inode->vfs_inode); 1111 btrfs_release_path(path); 1112 1113 ret = btrfs_unlink_inode(trans, root, 1114 BTRFS_I(victim_parent), 1115 inode, 1116 victim_name, 1117 victim_name_len); 1118 if (!ret) 1119 ret = btrfs_run_delayed_items( 1120 trans); 1121 } 1122 iput(victim_parent); 1123 kfree(victim_name); 1124 if (ret) 1125 return ret; 1126 *search_done = 1; 1127 goto again; 1128 } 1129 kfree(victim_name); 1130 next: 1131 cur_offset += victim_name_len + sizeof(*extref); 1132 } 1133 *search_done = 1; 1134 } 1135 btrfs_release_path(path); 1136 1137 /* look for a conflicting sequence number */ 1138 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1139 ref_index, name, namelen, 0); 1140 if (di && !IS_ERR(di)) { 1141 ret = drop_one_dir_item(trans, root, path, dir, di); 1142 if (ret) 1143 return ret; 1144 } 1145 btrfs_release_path(path); 1146 1147 /* look for a conflicting name */ 1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1149 name, namelen, 0); 1150 if (di && !IS_ERR(di)) { 1151 ret = drop_one_dir_item(trans, root, path, dir, di); 1152 if (ret) 1153 return ret; 1154 } 1155 btrfs_release_path(path); 1156 1157 return 0; 1158 } 1159 1160 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1161 u32 *namelen, char **name, u64 *index, 1162 u64 *parent_objectid) 1163 { 1164 struct btrfs_inode_extref *extref; 1165 1166 extref = (struct btrfs_inode_extref *)ref_ptr; 1167 1168 *namelen = btrfs_inode_extref_name_len(eb, extref); 1169 *name = kmalloc(*namelen, GFP_NOFS); 1170 if (*name == NULL) 1171 return -ENOMEM; 1172 1173 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1174 *namelen); 1175 1176 if (index) 1177 *index = btrfs_inode_extref_index(eb, extref); 1178 if (parent_objectid) 1179 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1180 1181 return 0; 1182 } 1183 1184 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1185 u32 *namelen, char **name, u64 *index) 1186 { 1187 struct btrfs_inode_ref *ref; 1188 1189 ref = (struct btrfs_inode_ref *)ref_ptr; 1190 1191 *namelen = btrfs_inode_ref_name_len(eb, ref); 1192 *name = kmalloc(*namelen, GFP_NOFS); 1193 if (*name == NULL) 1194 return -ENOMEM; 1195 1196 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1197 1198 if (index) 1199 *index = btrfs_inode_ref_index(eb, ref); 1200 1201 return 0; 1202 } 1203 1204 /* 1205 * Take an inode reference item from the log tree and iterate all names from the 1206 * inode reference item in the subvolume tree with the same key (if it exists). 1207 * For any name that is not in the inode reference item from the log tree, do a 1208 * proper unlink of that name (that is, remove its entry from the inode 1209 * reference item and both dir index keys). 1210 */ 1211 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, 1212 struct btrfs_root *root, 1213 struct btrfs_path *path, 1214 struct btrfs_inode *inode, 1215 struct extent_buffer *log_eb, 1216 int log_slot, 1217 struct btrfs_key *key) 1218 { 1219 int ret; 1220 unsigned long ref_ptr; 1221 unsigned long ref_end; 1222 struct extent_buffer *eb; 1223 1224 again: 1225 btrfs_release_path(path); 1226 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 1227 if (ret > 0) { 1228 ret = 0; 1229 goto out; 1230 } 1231 if (ret < 0) 1232 goto out; 1233 1234 eb = path->nodes[0]; 1235 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 1236 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); 1237 while (ref_ptr < ref_end) { 1238 char *name = NULL; 1239 int namelen; 1240 u64 parent_id; 1241 1242 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1243 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1244 NULL, &parent_id); 1245 } else { 1246 parent_id = key->offset; 1247 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1248 NULL); 1249 } 1250 if (ret) 1251 goto out; 1252 1253 if (key->type == BTRFS_INODE_EXTREF_KEY) 1254 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, 1255 parent_id, name, 1256 namelen); 1257 else 1258 ret = !!btrfs_find_name_in_backref(log_eb, log_slot, 1259 name, namelen); 1260 1261 if (!ret) { 1262 struct inode *dir; 1263 1264 btrfs_release_path(path); 1265 dir = read_one_inode(root, parent_id); 1266 if (!dir) { 1267 ret = -ENOENT; 1268 kfree(name); 1269 goto out; 1270 } 1271 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 1272 inode, name, namelen); 1273 kfree(name); 1274 iput(dir); 1275 if (ret) 1276 goto out; 1277 goto again; 1278 } 1279 1280 kfree(name); 1281 ref_ptr += namelen; 1282 if (key->type == BTRFS_INODE_EXTREF_KEY) 1283 ref_ptr += sizeof(struct btrfs_inode_extref); 1284 else 1285 ref_ptr += sizeof(struct btrfs_inode_ref); 1286 } 1287 ret = 0; 1288 out: 1289 btrfs_release_path(path); 1290 return ret; 1291 } 1292 1293 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, 1294 const u8 ref_type, const char *name, 1295 const int namelen) 1296 { 1297 struct btrfs_key key; 1298 struct btrfs_path *path; 1299 const u64 parent_id = btrfs_ino(BTRFS_I(dir)); 1300 int ret; 1301 1302 path = btrfs_alloc_path(); 1303 if (!path) 1304 return -ENOMEM; 1305 1306 key.objectid = btrfs_ino(BTRFS_I(inode)); 1307 key.type = ref_type; 1308 if (key.type == BTRFS_INODE_REF_KEY) 1309 key.offset = parent_id; 1310 else 1311 key.offset = btrfs_extref_hash(parent_id, name, namelen); 1312 1313 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0); 1314 if (ret < 0) 1315 goto out; 1316 if (ret > 0) { 1317 ret = 0; 1318 goto out; 1319 } 1320 if (key.type == BTRFS_INODE_EXTREF_KEY) 1321 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], 1322 path->slots[0], parent_id, name, namelen); 1323 else 1324 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 1325 name, namelen); 1326 1327 out: 1328 btrfs_free_path(path); 1329 return ret; 1330 } 1331 1332 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1333 struct inode *dir, struct inode *inode, const char *name, 1334 int namelen, u64 ref_index) 1335 { 1336 struct btrfs_dir_item *dir_item; 1337 struct btrfs_key key; 1338 struct btrfs_path *path; 1339 struct inode *other_inode = NULL; 1340 int ret; 1341 1342 path = btrfs_alloc_path(); 1343 if (!path) 1344 return -ENOMEM; 1345 1346 dir_item = btrfs_lookup_dir_item(NULL, root, path, 1347 btrfs_ino(BTRFS_I(dir)), 1348 name, namelen, 0); 1349 if (!dir_item) { 1350 btrfs_release_path(path); 1351 goto add_link; 1352 } else if (IS_ERR(dir_item)) { 1353 ret = PTR_ERR(dir_item); 1354 goto out; 1355 } 1356 1357 /* 1358 * Our inode's dentry collides with the dentry of another inode which is 1359 * in the log but not yet processed since it has a higher inode number. 1360 * So delete that other dentry. 1361 */ 1362 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key); 1363 btrfs_release_path(path); 1364 other_inode = read_one_inode(root, key.objectid); 1365 if (!other_inode) { 1366 ret = -ENOENT; 1367 goto out; 1368 } 1369 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode), 1370 name, namelen); 1371 if (ret) 1372 goto out; 1373 /* 1374 * If we dropped the link count to 0, bump it so that later the iput() 1375 * on the inode will not free it. We will fixup the link count later. 1376 */ 1377 if (other_inode->i_nlink == 0) 1378 inc_nlink(other_inode); 1379 1380 ret = btrfs_run_delayed_items(trans); 1381 if (ret) 1382 goto out; 1383 add_link: 1384 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 1385 name, namelen, 0, ref_index); 1386 out: 1387 iput(other_inode); 1388 btrfs_free_path(path); 1389 1390 return ret; 1391 } 1392 1393 /* 1394 * replay one inode back reference item found in the log tree. 1395 * eb, slot and key refer to the buffer and key found in the log tree. 1396 * root is the destination we are replaying into, and path is for temp 1397 * use by this function. (it should be released on return). 1398 */ 1399 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1400 struct btrfs_root *root, 1401 struct btrfs_root *log, 1402 struct btrfs_path *path, 1403 struct extent_buffer *eb, int slot, 1404 struct btrfs_key *key) 1405 { 1406 struct inode *dir = NULL; 1407 struct inode *inode = NULL; 1408 unsigned long ref_ptr; 1409 unsigned long ref_end; 1410 char *name = NULL; 1411 int namelen; 1412 int ret; 1413 int search_done = 0; 1414 int log_ref_ver = 0; 1415 u64 parent_objectid; 1416 u64 inode_objectid; 1417 u64 ref_index = 0; 1418 int ref_struct_size; 1419 1420 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1421 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1422 1423 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1424 struct btrfs_inode_extref *r; 1425 1426 ref_struct_size = sizeof(struct btrfs_inode_extref); 1427 log_ref_ver = 1; 1428 r = (struct btrfs_inode_extref *)ref_ptr; 1429 parent_objectid = btrfs_inode_extref_parent(eb, r); 1430 } else { 1431 ref_struct_size = sizeof(struct btrfs_inode_ref); 1432 parent_objectid = key->offset; 1433 } 1434 inode_objectid = key->objectid; 1435 1436 /* 1437 * it is possible that we didn't log all the parent directories 1438 * for a given inode. If we don't find the dir, just don't 1439 * copy the back ref in. The link count fixup code will take 1440 * care of the rest 1441 */ 1442 dir = read_one_inode(root, parent_objectid); 1443 if (!dir) { 1444 ret = -ENOENT; 1445 goto out; 1446 } 1447 1448 inode = read_one_inode(root, inode_objectid); 1449 if (!inode) { 1450 ret = -EIO; 1451 goto out; 1452 } 1453 1454 while (ref_ptr < ref_end) { 1455 if (log_ref_ver) { 1456 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1457 &ref_index, &parent_objectid); 1458 /* 1459 * parent object can change from one array 1460 * item to another. 1461 */ 1462 if (!dir) 1463 dir = read_one_inode(root, parent_objectid); 1464 if (!dir) { 1465 ret = -ENOENT; 1466 goto out; 1467 } 1468 } else { 1469 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1470 &ref_index); 1471 } 1472 if (ret) 1473 goto out; 1474 1475 /* if we already have a perfect match, we're done */ 1476 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), 1477 btrfs_ino(BTRFS_I(inode)), ref_index, 1478 name, namelen)) { 1479 /* 1480 * look for a conflicting back reference in the 1481 * metadata. if we find one we have to unlink that name 1482 * of the file before we add our new link. Later on, we 1483 * overwrite any existing back reference, and we don't 1484 * want to create dangling pointers in the directory. 1485 */ 1486 1487 if (!search_done) { 1488 ret = __add_inode_ref(trans, root, path, log, 1489 BTRFS_I(dir), 1490 BTRFS_I(inode), 1491 inode_objectid, 1492 parent_objectid, 1493 ref_index, name, namelen, 1494 &search_done); 1495 if (ret) { 1496 if (ret == 1) 1497 ret = 0; 1498 goto out; 1499 } 1500 } 1501 1502 /* 1503 * If a reference item already exists for this inode 1504 * with the same parent and name, but different index, 1505 * drop it and the corresponding directory index entries 1506 * from the parent before adding the new reference item 1507 * and dir index entries, otherwise we would fail with 1508 * -EEXIST returned from btrfs_add_link() below. 1509 */ 1510 ret = btrfs_inode_ref_exists(inode, dir, key->type, 1511 name, namelen); 1512 if (ret > 0) { 1513 ret = btrfs_unlink_inode(trans, root, 1514 BTRFS_I(dir), 1515 BTRFS_I(inode), 1516 name, namelen); 1517 /* 1518 * If we dropped the link count to 0, bump it so 1519 * that later the iput() on the inode will not 1520 * free it. We will fixup the link count later. 1521 */ 1522 if (!ret && inode->i_nlink == 0) 1523 inc_nlink(inode); 1524 } 1525 if (ret < 0) 1526 goto out; 1527 1528 /* insert our name */ 1529 ret = add_link(trans, root, dir, inode, name, namelen, 1530 ref_index); 1531 if (ret) 1532 goto out; 1533 1534 btrfs_update_inode(trans, root, inode); 1535 } 1536 1537 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1538 kfree(name); 1539 name = NULL; 1540 if (log_ref_ver) { 1541 iput(dir); 1542 dir = NULL; 1543 } 1544 } 1545 1546 /* 1547 * Before we overwrite the inode reference item in the subvolume tree 1548 * with the item from the log tree, we must unlink all names from the 1549 * parent directory that are in the subvolume's tree inode reference 1550 * item, otherwise we end up with an inconsistent subvolume tree where 1551 * dir index entries exist for a name but there is no inode reference 1552 * item with the same name. 1553 */ 1554 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, 1555 key); 1556 if (ret) 1557 goto out; 1558 1559 /* finally write the back reference in the inode */ 1560 ret = overwrite_item(trans, root, path, eb, slot, key); 1561 out: 1562 btrfs_release_path(path); 1563 kfree(name); 1564 iput(dir); 1565 iput(inode); 1566 return ret; 1567 } 1568 1569 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1570 struct btrfs_root *root, u64 ino) 1571 { 1572 int ret; 1573 1574 ret = btrfs_insert_orphan_item(trans, root, ino); 1575 if (ret == -EEXIST) 1576 ret = 0; 1577 1578 return ret; 1579 } 1580 1581 static int count_inode_extrefs(struct btrfs_root *root, 1582 struct btrfs_inode *inode, struct btrfs_path *path) 1583 { 1584 int ret = 0; 1585 int name_len; 1586 unsigned int nlink = 0; 1587 u32 item_size; 1588 u32 cur_offset = 0; 1589 u64 inode_objectid = btrfs_ino(inode); 1590 u64 offset = 0; 1591 unsigned long ptr; 1592 struct btrfs_inode_extref *extref; 1593 struct extent_buffer *leaf; 1594 1595 while (1) { 1596 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1597 &extref, &offset); 1598 if (ret) 1599 break; 1600 1601 leaf = path->nodes[0]; 1602 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1603 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1604 cur_offset = 0; 1605 1606 while (cur_offset < item_size) { 1607 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1608 name_len = btrfs_inode_extref_name_len(leaf, extref); 1609 1610 nlink++; 1611 1612 cur_offset += name_len + sizeof(*extref); 1613 } 1614 1615 offset++; 1616 btrfs_release_path(path); 1617 } 1618 btrfs_release_path(path); 1619 1620 if (ret < 0 && ret != -ENOENT) 1621 return ret; 1622 return nlink; 1623 } 1624 1625 static int count_inode_refs(struct btrfs_root *root, 1626 struct btrfs_inode *inode, struct btrfs_path *path) 1627 { 1628 int ret; 1629 struct btrfs_key key; 1630 unsigned int nlink = 0; 1631 unsigned long ptr; 1632 unsigned long ptr_end; 1633 int name_len; 1634 u64 ino = btrfs_ino(inode); 1635 1636 key.objectid = ino; 1637 key.type = BTRFS_INODE_REF_KEY; 1638 key.offset = (u64)-1; 1639 1640 while (1) { 1641 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1642 if (ret < 0) 1643 break; 1644 if (ret > 0) { 1645 if (path->slots[0] == 0) 1646 break; 1647 path->slots[0]--; 1648 } 1649 process_slot: 1650 btrfs_item_key_to_cpu(path->nodes[0], &key, 1651 path->slots[0]); 1652 if (key.objectid != ino || 1653 key.type != BTRFS_INODE_REF_KEY) 1654 break; 1655 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1656 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1657 path->slots[0]); 1658 while (ptr < ptr_end) { 1659 struct btrfs_inode_ref *ref; 1660 1661 ref = (struct btrfs_inode_ref *)ptr; 1662 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1663 ref); 1664 ptr = (unsigned long)(ref + 1) + name_len; 1665 nlink++; 1666 } 1667 1668 if (key.offset == 0) 1669 break; 1670 if (path->slots[0] > 0) { 1671 path->slots[0]--; 1672 goto process_slot; 1673 } 1674 key.offset--; 1675 btrfs_release_path(path); 1676 } 1677 btrfs_release_path(path); 1678 1679 return nlink; 1680 } 1681 1682 /* 1683 * There are a few corners where the link count of the file can't 1684 * be properly maintained during replay. So, instead of adding 1685 * lots of complexity to the log code, we just scan the backrefs 1686 * for any file that has been through replay. 1687 * 1688 * The scan will update the link count on the inode to reflect the 1689 * number of back refs found. If it goes down to zero, the iput 1690 * will free the inode. 1691 */ 1692 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1693 struct btrfs_root *root, 1694 struct inode *inode) 1695 { 1696 struct btrfs_path *path; 1697 int ret; 1698 u64 nlink = 0; 1699 u64 ino = btrfs_ino(BTRFS_I(inode)); 1700 1701 path = btrfs_alloc_path(); 1702 if (!path) 1703 return -ENOMEM; 1704 1705 ret = count_inode_refs(root, BTRFS_I(inode), path); 1706 if (ret < 0) 1707 goto out; 1708 1709 nlink = ret; 1710 1711 ret = count_inode_extrefs(root, BTRFS_I(inode), path); 1712 if (ret < 0) 1713 goto out; 1714 1715 nlink += ret; 1716 1717 ret = 0; 1718 1719 if (nlink != inode->i_nlink) { 1720 set_nlink(inode, nlink); 1721 btrfs_update_inode(trans, root, inode); 1722 } 1723 BTRFS_I(inode)->index_cnt = (u64)-1; 1724 1725 if (inode->i_nlink == 0) { 1726 if (S_ISDIR(inode->i_mode)) { 1727 ret = replay_dir_deletes(trans, root, NULL, path, 1728 ino, 1); 1729 if (ret) 1730 goto out; 1731 } 1732 ret = insert_orphan_item(trans, root, ino); 1733 } 1734 1735 out: 1736 btrfs_free_path(path); 1737 return ret; 1738 } 1739 1740 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1741 struct btrfs_root *root, 1742 struct btrfs_path *path) 1743 { 1744 int ret; 1745 struct btrfs_key key; 1746 struct inode *inode; 1747 1748 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1749 key.type = BTRFS_ORPHAN_ITEM_KEY; 1750 key.offset = (u64)-1; 1751 while (1) { 1752 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1753 if (ret < 0) 1754 break; 1755 1756 if (ret == 1) { 1757 if (path->slots[0] == 0) 1758 break; 1759 path->slots[0]--; 1760 } 1761 1762 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1763 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1764 key.type != BTRFS_ORPHAN_ITEM_KEY) 1765 break; 1766 1767 ret = btrfs_del_item(trans, root, path); 1768 if (ret) 1769 goto out; 1770 1771 btrfs_release_path(path); 1772 inode = read_one_inode(root, key.offset); 1773 if (!inode) 1774 return -EIO; 1775 1776 ret = fixup_inode_link_count(trans, root, inode); 1777 iput(inode); 1778 if (ret) 1779 goto out; 1780 1781 /* 1782 * fixup on a directory may create new entries, 1783 * make sure we always look for the highset possible 1784 * offset 1785 */ 1786 key.offset = (u64)-1; 1787 } 1788 ret = 0; 1789 out: 1790 btrfs_release_path(path); 1791 return ret; 1792 } 1793 1794 1795 /* 1796 * record a given inode in the fixup dir so we can check its link 1797 * count when replay is done. The link count is incremented here 1798 * so the inode won't go away until we check it 1799 */ 1800 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1801 struct btrfs_root *root, 1802 struct btrfs_path *path, 1803 u64 objectid) 1804 { 1805 struct btrfs_key key; 1806 int ret = 0; 1807 struct inode *inode; 1808 1809 inode = read_one_inode(root, objectid); 1810 if (!inode) 1811 return -EIO; 1812 1813 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1814 key.type = BTRFS_ORPHAN_ITEM_KEY; 1815 key.offset = objectid; 1816 1817 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1818 1819 btrfs_release_path(path); 1820 if (ret == 0) { 1821 if (!inode->i_nlink) 1822 set_nlink(inode, 1); 1823 else 1824 inc_nlink(inode); 1825 ret = btrfs_update_inode(trans, root, inode); 1826 } else if (ret == -EEXIST) { 1827 ret = 0; 1828 } else { 1829 BUG(); /* Logic Error */ 1830 } 1831 iput(inode); 1832 1833 return ret; 1834 } 1835 1836 /* 1837 * when replaying the log for a directory, we only insert names 1838 * for inodes that actually exist. This means an fsync on a directory 1839 * does not implicitly fsync all the new files in it 1840 */ 1841 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1842 struct btrfs_root *root, 1843 u64 dirid, u64 index, 1844 char *name, int name_len, 1845 struct btrfs_key *location) 1846 { 1847 struct inode *inode; 1848 struct inode *dir; 1849 int ret; 1850 1851 inode = read_one_inode(root, location->objectid); 1852 if (!inode) 1853 return -ENOENT; 1854 1855 dir = read_one_inode(root, dirid); 1856 if (!dir) { 1857 iput(inode); 1858 return -EIO; 1859 } 1860 1861 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1862 name_len, 1, index); 1863 1864 /* FIXME, put inode into FIXUP list */ 1865 1866 iput(inode); 1867 iput(dir); 1868 return ret; 1869 } 1870 1871 /* 1872 * take a single entry in a log directory item and replay it into 1873 * the subvolume. 1874 * 1875 * if a conflicting item exists in the subdirectory already, 1876 * the inode it points to is unlinked and put into the link count 1877 * fix up tree. 1878 * 1879 * If a name from the log points to a file or directory that does 1880 * not exist in the FS, it is skipped. fsyncs on directories 1881 * do not force down inodes inside that directory, just changes to the 1882 * names or unlinks in a directory. 1883 * 1884 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1885 * non-existing inode) and 1 if the name was replayed. 1886 */ 1887 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1888 struct btrfs_root *root, 1889 struct btrfs_path *path, 1890 struct extent_buffer *eb, 1891 struct btrfs_dir_item *di, 1892 struct btrfs_key *key) 1893 { 1894 char *name; 1895 int name_len; 1896 struct btrfs_dir_item *dst_di; 1897 struct btrfs_key found_key; 1898 struct btrfs_key log_key; 1899 struct inode *dir; 1900 u8 log_type; 1901 int exists; 1902 int ret = 0; 1903 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1904 bool name_added = false; 1905 1906 dir = read_one_inode(root, key->objectid); 1907 if (!dir) 1908 return -EIO; 1909 1910 name_len = btrfs_dir_name_len(eb, di); 1911 name = kmalloc(name_len, GFP_NOFS); 1912 if (!name) { 1913 ret = -ENOMEM; 1914 goto out; 1915 } 1916 1917 log_type = btrfs_dir_type(eb, di); 1918 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1919 name_len); 1920 1921 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1922 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1923 if (exists == 0) 1924 exists = 1; 1925 else 1926 exists = 0; 1927 btrfs_release_path(path); 1928 1929 if (key->type == BTRFS_DIR_ITEM_KEY) { 1930 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1931 name, name_len, 1); 1932 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1933 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1934 key->objectid, 1935 key->offset, name, 1936 name_len, 1); 1937 } else { 1938 /* Corruption */ 1939 ret = -EINVAL; 1940 goto out; 1941 } 1942 if (IS_ERR_OR_NULL(dst_di)) { 1943 /* we need a sequence number to insert, so we only 1944 * do inserts for the BTRFS_DIR_INDEX_KEY types 1945 */ 1946 if (key->type != BTRFS_DIR_INDEX_KEY) 1947 goto out; 1948 goto insert; 1949 } 1950 1951 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1952 /* the existing item matches the logged item */ 1953 if (found_key.objectid == log_key.objectid && 1954 found_key.type == log_key.type && 1955 found_key.offset == log_key.offset && 1956 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1957 update_size = false; 1958 goto out; 1959 } 1960 1961 /* 1962 * don't drop the conflicting directory entry if the inode 1963 * for the new entry doesn't exist 1964 */ 1965 if (!exists) 1966 goto out; 1967 1968 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); 1969 if (ret) 1970 goto out; 1971 1972 if (key->type == BTRFS_DIR_INDEX_KEY) 1973 goto insert; 1974 out: 1975 btrfs_release_path(path); 1976 if (!ret && update_size) { 1977 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); 1978 ret = btrfs_update_inode(trans, root, dir); 1979 } 1980 kfree(name); 1981 iput(dir); 1982 if (!ret && name_added) 1983 ret = 1; 1984 return ret; 1985 1986 insert: 1987 /* 1988 * Check if the inode reference exists in the log for the given name, 1989 * inode and parent inode 1990 */ 1991 found_key.objectid = log_key.objectid; 1992 found_key.type = BTRFS_INODE_REF_KEY; 1993 found_key.offset = key->objectid; 1994 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len); 1995 if (ret < 0) { 1996 goto out; 1997 } else if (ret) { 1998 /* The dentry will be added later. */ 1999 ret = 0; 2000 update_size = false; 2001 goto out; 2002 } 2003 2004 found_key.objectid = log_key.objectid; 2005 found_key.type = BTRFS_INODE_EXTREF_KEY; 2006 found_key.offset = key->objectid; 2007 ret = backref_in_log(root->log_root, &found_key, key->objectid, name, 2008 name_len); 2009 if (ret < 0) { 2010 goto out; 2011 } else if (ret) { 2012 /* The dentry will be added later. */ 2013 ret = 0; 2014 update_size = false; 2015 goto out; 2016 } 2017 btrfs_release_path(path); 2018 ret = insert_one_name(trans, root, key->objectid, key->offset, 2019 name, name_len, &log_key); 2020 if (ret && ret != -ENOENT && ret != -EEXIST) 2021 goto out; 2022 if (!ret) 2023 name_added = true; 2024 update_size = false; 2025 ret = 0; 2026 goto out; 2027 } 2028 2029 /* 2030 * find all the names in a directory item and reconcile them into 2031 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 2032 * one name in a directory item, but the same code gets used for 2033 * both directory index types 2034 */ 2035 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 2036 struct btrfs_root *root, 2037 struct btrfs_path *path, 2038 struct extent_buffer *eb, int slot, 2039 struct btrfs_key *key) 2040 { 2041 int ret = 0; 2042 u32 item_size = btrfs_item_size_nr(eb, slot); 2043 struct btrfs_dir_item *di; 2044 int name_len; 2045 unsigned long ptr; 2046 unsigned long ptr_end; 2047 struct btrfs_path *fixup_path = NULL; 2048 2049 ptr = btrfs_item_ptr_offset(eb, slot); 2050 ptr_end = ptr + item_size; 2051 while (ptr < ptr_end) { 2052 di = (struct btrfs_dir_item *)ptr; 2053 name_len = btrfs_dir_name_len(eb, di); 2054 ret = replay_one_name(trans, root, path, eb, di, key); 2055 if (ret < 0) 2056 break; 2057 ptr = (unsigned long)(di + 1); 2058 ptr += name_len; 2059 2060 /* 2061 * If this entry refers to a non-directory (directories can not 2062 * have a link count > 1) and it was added in the transaction 2063 * that was not committed, make sure we fixup the link count of 2064 * the inode it the entry points to. Otherwise something like 2065 * the following would result in a directory pointing to an 2066 * inode with a wrong link that does not account for this dir 2067 * entry: 2068 * 2069 * mkdir testdir 2070 * touch testdir/foo 2071 * touch testdir/bar 2072 * sync 2073 * 2074 * ln testdir/bar testdir/bar_link 2075 * ln testdir/foo testdir/foo_link 2076 * xfs_io -c "fsync" testdir/bar 2077 * 2078 * <power failure> 2079 * 2080 * mount fs, log replay happens 2081 * 2082 * File foo would remain with a link count of 1 when it has two 2083 * entries pointing to it in the directory testdir. This would 2084 * make it impossible to ever delete the parent directory has 2085 * it would result in stale dentries that can never be deleted. 2086 */ 2087 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 2088 struct btrfs_key di_key; 2089 2090 if (!fixup_path) { 2091 fixup_path = btrfs_alloc_path(); 2092 if (!fixup_path) { 2093 ret = -ENOMEM; 2094 break; 2095 } 2096 } 2097 2098 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2099 ret = link_to_fixup_dir(trans, root, fixup_path, 2100 di_key.objectid); 2101 if (ret) 2102 break; 2103 } 2104 ret = 0; 2105 } 2106 btrfs_free_path(fixup_path); 2107 return ret; 2108 } 2109 2110 /* 2111 * directory replay has two parts. There are the standard directory 2112 * items in the log copied from the subvolume, and range items 2113 * created in the log while the subvolume was logged. 2114 * 2115 * The range items tell us which parts of the key space the log 2116 * is authoritative for. During replay, if a key in the subvolume 2117 * directory is in a logged range item, but not actually in the log 2118 * that means it was deleted from the directory before the fsync 2119 * and should be removed. 2120 */ 2121 static noinline int find_dir_range(struct btrfs_root *root, 2122 struct btrfs_path *path, 2123 u64 dirid, int key_type, 2124 u64 *start_ret, u64 *end_ret) 2125 { 2126 struct btrfs_key key; 2127 u64 found_end; 2128 struct btrfs_dir_log_item *item; 2129 int ret; 2130 int nritems; 2131 2132 if (*start_ret == (u64)-1) 2133 return 1; 2134 2135 key.objectid = dirid; 2136 key.type = key_type; 2137 key.offset = *start_ret; 2138 2139 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2140 if (ret < 0) 2141 goto out; 2142 if (ret > 0) { 2143 if (path->slots[0] == 0) 2144 goto out; 2145 path->slots[0]--; 2146 } 2147 if (ret != 0) 2148 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2149 2150 if (key.type != key_type || key.objectid != dirid) { 2151 ret = 1; 2152 goto next; 2153 } 2154 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2155 struct btrfs_dir_log_item); 2156 found_end = btrfs_dir_log_end(path->nodes[0], item); 2157 2158 if (*start_ret >= key.offset && *start_ret <= found_end) { 2159 ret = 0; 2160 *start_ret = key.offset; 2161 *end_ret = found_end; 2162 goto out; 2163 } 2164 ret = 1; 2165 next: 2166 /* check the next slot in the tree to see if it is a valid item */ 2167 nritems = btrfs_header_nritems(path->nodes[0]); 2168 path->slots[0]++; 2169 if (path->slots[0] >= nritems) { 2170 ret = btrfs_next_leaf(root, path); 2171 if (ret) 2172 goto out; 2173 } 2174 2175 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2176 2177 if (key.type != key_type || key.objectid != dirid) { 2178 ret = 1; 2179 goto out; 2180 } 2181 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2182 struct btrfs_dir_log_item); 2183 found_end = btrfs_dir_log_end(path->nodes[0], item); 2184 *start_ret = key.offset; 2185 *end_ret = found_end; 2186 ret = 0; 2187 out: 2188 btrfs_release_path(path); 2189 return ret; 2190 } 2191 2192 /* 2193 * this looks for a given directory item in the log. If the directory 2194 * item is not in the log, the item is removed and the inode it points 2195 * to is unlinked 2196 */ 2197 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 2198 struct btrfs_root *root, 2199 struct btrfs_root *log, 2200 struct btrfs_path *path, 2201 struct btrfs_path *log_path, 2202 struct inode *dir, 2203 struct btrfs_key *dir_key) 2204 { 2205 int ret; 2206 struct extent_buffer *eb; 2207 int slot; 2208 u32 item_size; 2209 struct btrfs_dir_item *di; 2210 struct btrfs_dir_item *log_di; 2211 int name_len; 2212 unsigned long ptr; 2213 unsigned long ptr_end; 2214 char *name; 2215 struct inode *inode; 2216 struct btrfs_key location; 2217 2218 again: 2219 eb = path->nodes[0]; 2220 slot = path->slots[0]; 2221 item_size = btrfs_item_size_nr(eb, slot); 2222 ptr = btrfs_item_ptr_offset(eb, slot); 2223 ptr_end = ptr + item_size; 2224 while (ptr < ptr_end) { 2225 di = (struct btrfs_dir_item *)ptr; 2226 name_len = btrfs_dir_name_len(eb, di); 2227 name = kmalloc(name_len, GFP_NOFS); 2228 if (!name) { 2229 ret = -ENOMEM; 2230 goto out; 2231 } 2232 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2233 name_len); 2234 log_di = NULL; 2235 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2236 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2237 dir_key->objectid, 2238 name, name_len, 0); 2239 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2240 log_di = btrfs_lookup_dir_index_item(trans, log, 2241 log_path, 2242 dir_key->objectid, 2243 dir_key->offset, 2244 name, name_len, 0); 2245 } 2246 if (!log_di || log_di == ERR_PTR(-ENOENT)) { 2247 btrfs_dir_item_key_to_cpu(eb, di, &location); 2248 btrfs_release_path(path); 2249 btrfs_release_path(log_path); 2250 inode = read_one_inode(root, location.objectid); 2251 if (!inode) { 2252 kfree(name); 2253 return -EIO; 2254 } 2255 2256 ret = link_to_fixup_dir(trans, root, 2257 path, location.objectid); 2258 if (ret) { 2259 kfree(name); 2260 iput(inode); 2261 goto out; 2262 } 2263 2264 inc_nlink(inode); 2265 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 2266 BTRFS_I(inode), name, name_len); 2267 if (!ret) 2268 ret = btrfs_run_delayed_items(trans); 2269 kfree(name); 2270 iput(inode); 2271 if (ret) 2272 goto out; 2273 2274 /* there might still be more names under this key 2275 * check and repeat if required 2276 */ 2277 ret = btrfs_search_slot(NULL, root, dir_key, path, 2278 0, 0); 2279 if (ret == 0) 2280 goto again; 2281 ret = 0; 2282 goto out; 2283 } else if (IS_ERR(log_di)) { 2284 kfree(name); 2285 return PTR_ERR(log_di); 2286 } 2287 btrfs_release_path(log_path); 2288 kfree(name); 2289 2290 ptr = (unsigned long)(di + 1); 2291 ptr += name_len; 2292 } 2293 ret = 0; 2294 out: 2295 btrfs_release_path(path); 2296 btrfs_release_path(log_path); 2297 return ret; 2298 } 2299 2300 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2301 struct btrfs_root *root, 2302 struct btrfs_root *log, 2303 struct btrfs_path *path, 2304 const u64 ino) 2305 { 2306 struct btrfs_key search_key; 2307 struct btrfs_path *log_path; 2308 int i; 2309 int nritems; 2310 int ret; 2311 2312 log_path = btrfs_alloc_path(); 2313 if (!log_path) 2314 return -ENOMEM; 2315 2316 search_key.objectid = ino; 2317 search_key.type = BTRFS_XATTR_ITEM_KEY; 2318 search_key.offset = 0; 2319 again: 2320 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2321 if (ret < 0) 2322 goto out; 2323 process_leaf: 2324 nritems = btrfs_header_nritems(path->nodes[0]); 2325 for (i = path->slots[0]; i < nritems; i++) { 2326 struct btrfs_key key; 2327 struct btrfs_dir_item *di; 2328 struct btrfs_dir_item *log_di; 2329 u32 total_size; 2330 u32 cur; 2331 2332 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2333 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2334 ret = 0; 2335 goto out; 2336 } 2337 2338 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2339 total_size = btrfs_item_size_nr(path->nodes[0], i); 2340 cur = 0; 2341 while (cur < total_size) { 2342 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2343 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2344 u32 this_len = sizeof(*di) + name_len + data_len; 2345 char *name; 2346 2347 name = kmalloc(name_len, GFP_NOFS); 2348 if (!name) { 2349 ret = -ENOMEM; 2350 goto out; 2351 } 2352 read_extent_buffer(path->nodes[0], name, 2353 (unsigned long)(di + 1), name_len); 2354 2355 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2356 name, name_len, 0); 2357 btrfs_release_path(log_path); 2358 if (!log_di) { 2359 /* Doesn't exist in log tree, so delete it. */ 2360 btrfs_release_path(path); 2361 di = btrfs_lookup_xattr(trans, root, path, ino, 2362 name, name_len, -1); 2363 kfree(name); 2364 if (IS_ERR(di)) { 2365 ret = PTR_ERR(di); 2366 goto out; 2367 } 2368 ASSERT(di); 2369 ret = btrfs_delete_one_dir_name(trans, root, 2370 path, di); 2371 if (ret) 2372 goto out; 2373 btrfs_release_path(path); 2374 search_key = key; 2375 goto again; 2376 } 2377 kfree(name); 2378 if (IS_ERR(log_di)) { 2379 ret = PTR_ERR(log_di); 2380 goto out; 2381 } 2382 cur += this_len; 2383 di = (struct btrfs_dir_item *)((char *)di + this_len); 2384 } 2385 } 2386 ret = btrfs_next_leaf(root, path); 2387 if (ret > 0) 2388 ret = 0; 2389 else if (ret == 0) 2390 goto process_leaf; 2391 out: 2392 btrfs_free_path(log_path); 2393 btrfs_release_path(path); 2394 return ret; 2395 } 2396 2397 2398 /* 2399 * deletion replay happens before we copy any new directory items 2400 * out of the log or out of backreferences from inodes. It 2401 * scans the log to find ranges of keys that log is authoritative for, 2402 * and then scans the directory to find items in those ranges that are 2403 * not present in the log. 2404 * 2405 * Anything we don't find in the log is unlinked and removed from the 2406 * directory. 2407 */ 2408 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2409 struct btrfs_root *root, 2410 struct btrfs_root *log, 2411 struct btrfs_path *path, 2412 u64 dirid, int del_all) 2413 { 2414 u64 range_start; 2415 u64 range_end; 2416 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2417 int ret = 0; 2418 struct btrfs_key dir_key; 2419 struct btrfs_key found_key; 2420 struct btrfs_path *log_path; 2421 struct inode *dir; 2422 2423 dir_key.objectid = dirid; 2424 dir_key.type = BTRFS_DIR_ITEM_KEY; 2425 log_path = btrfs_alloc_path(); 2426 if (!log_path) 2427 return -ENOMEM; 2428 2429 dir = read_one_inode(root, dirid); 2430 /* it isn't an error if the inode isn't there, that can happen 2431 * because we replay the deletes before we copy in the inode item 2432 * from the log 2433 */ 2434 if (!dir) { 2435 btrfs_free_path(log_path); 2436 return 0; 2437 } 2438 again: 2439 range_start = 0; 2440 range_end = 0; 2441 while (1) { 2442 if (del_all) 2443 range_end = (u64)-1; 2444 else { 2445 ret = find_dir_range(log, path, dirid, key_type, 2446 &range_start, &range_end); 2447 if (ret != 0) 2448 break; 2449 } 2450 2451 dir_key.offset = range_start; 2452 while (1) { 2453 int nritems; 2454 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2455 0, 0); 2456 if (ret < 0) 2457 goto out; 2458 2459 nritems = btrfs_header_nritems(path->nodes[0]); 2460 if (path->slots[0] >= nritems) { 2461 ret = btrfs_next_leaf(root, path); 2462 if (ret == 1) 2463 break; 2464 else if (ret < 0) 2465 goto out; 2466 } 2467 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2468 path->slots[0]); 2469 if (found_key.objectid != dirid || 2470 found_key.type != dir_key.type) 2471 goto next_type; 2472 2473 if (found_key.offset > range_end) 2474 break; 2475 2476 ret = check_item_in_log(trans, root, log, path, 2477 log_path, dir, 2478 &found_key); 2479 if (ret) 2480 goto out; 2481 if (found_key.offset == (u64)-1) 2482 break; 2483 dir_key.offset = found_key.offset + 1; 2484 } 2485 btrfs_release_path(path); 2486 if (range_end == (u64)-1) 2487 break; 2488 range_start = range_end + 1; 2489 } 2490 2491 next_type: 2492 ret = 0; 2493 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2494 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2495 dir_key.type = BTRFS_DIR_INDEX_KEY; 2496 btrfs_release_path(path); 2497 goto again; 2498 } 2499 out: 2500 btrfs_release_path(path); 2501 btrfs_free_path(log_path); 2502 iput(dir); 2503 return ret; 2504 } 2505 2506 /* 2507 * the process_func used to replay items from the log tree. This 2508 * gets called in two different stages. The first stage just looks 2509 * for inodes and makes sure they are all copied into the subvolume. 2510 * 2511 * The second stage copies all the other item types from the log into 2512 * the subvolume. The two stage approach is slower, but gets rid of 2513 * lots of complexity around inodes referencing other inodes that exist 2514 * only in the log (references come from either directory items or inode 2515 * back refs). 2516 */ 2517 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2518 struct walk_control *wc, u64 gen, int level) 2519 { 2520 int nritems; 2521 struct btrfs_path *path; 2522 struct btrfs_root *root = wc->replay_dest; 2523 struct btrfs_key key; 2524 int i; 2525 int ret; 2526 2527 ret = btrfs_read_buffer(eb, gen, level, NULL); 2528 if (ret) 2529 return ret; 2530 2531 level = btrfs_header_level(eb); 2532 2533 if (level != 0) 2534 return 0; 2535 2536 path = btrfs_alloc_path(); 2537 if (!path) 2538 return -ENOMEM; 2539 2540 nritems = btrfs_header_nritems(eb); 2541 for (i = 0; i < nritems; i++) { 2542 btrfs_item_key_to_cpu(eb, &key, i); 2543 2544 /* inode keys are done during the first stage */ 2545 if (key.type == BTRFS_INODE_ITEM_KEY && 2546 wc->stage == LOG_WALK_REPLAY_INODES) { 2547 struct btrfs_inode_item *inode_item; 2548 u32 mode; 2549 2550 inode_item = btrfs_item_ptr(eb, i, 2551 struct btrfs_inode_item); 2552 /* 2553 * If we have a tmpfile (O_TMPFILE) that got fsync'ed 2554 * and never got linked before the fsync, skip it, as 2555 * replaying it is pointless since it would be deleted 2556 * later. We skip logging tmpfiles, but it's always 2557 * possible we are replaying a log created with a kernel 2558 * that used to log tmpfiles. 2559 */ 2560 if (btrfs_inode_nlink(eb, inode_item) == 0) { 2561 wc->ignore_cur_inode = true; 2562 continue; 2563 } else { 2564 wc->ignore_cur_inode = false; 2565 } 2566 ret = replay_xattr_deletes(wc->trans, root, log, 2567 path, key.objectid); 2568 if (ret) 2569 break; 2570 mode = btrfs_inode_mode(eb, inode_item); 2571 if (S_ISDIR(mode)) { 2572 ret = replay_dir_deletes(wc->trans, 2573 root, log, path, key.objectid, 0); 2574 if (ret) 2575 break; 2576 } 2577 ret = overwrite_item(wc->trans, root, path, 2578 eb, i, &key); 2579 if (ret) 2580 break; 2581 2582 /* 2583 * Before replaying extents, truncate the inode to its 2584 * size. We need to do it now and not after log replay 2585 * because before an fsync we can have prealloc extents 2586 * added beyond the inode's i_size. If we did it after, 2587 * through orphan cleanup for example, we would drop 2588 * those prealloc extents just after replaying them. 2589 */ 2590 if (S_ISREG(mode)) { 2591 struct inode *inode; 2592 u64 from; 2593 2594 inode = read_one_inode(root, key.objectid); 2595 if (!inode) { 2596 ret = -EIO; 2597 break; 2598 } 2599 from = ALIGN(i_size_read(inode), 2600 root->fs_info->sectorsize); 2601 ret = btrfs_drop_extents(wc->trans, root, inode, 2602 from, (u64)-1, 1); 2603 if (!ret) { 2604 /* Update the inode's nbytes. */ 2605 ret = btrfs_update_inode(wc->trans, 2606 root, inode); 2607 } 2608 iput(inode); 2609 if (ret) 2610 break; 2611 } 2612 2613 ret = link_to_fixup_dir(wc->trans, root, 2614 path, key.objectid); 2615 if (ret) 2616 break; 2617 } 2618 2619 if (wc->ignore_cur_inode) 2620 continue; 2621 2622 if (key.type == BTRFS_DIR_INDEX_KEY && 2623 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2624 ret = replay_one_dir_item(wc->trans, root, path, 2625 eb, i, &key); 2626 if (ret) 2627 break; 2628 } 2629 2630 if (wc->stage < LOG_WALK_REPLAY_ALL) 2631 continue; 2632 2633 /* these keys are simply copied */ 2634 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2635 ret = overwrite_item(wc->trans, root, path, 2636 eb, i, &key); 2637 if (ret) 2638 break; 2639 } else if (key.type == BTRFS_INODE_REF_KEY || 2640 key.type == BTRFS_INODE_EXTREF_KEY) { 2641 ret = add_inode_ref(wc->trans, root, log, path, 2642 eb, i, &key); 2643 if (ret && ret != -ENOENT) 2644 break; 2645 ret = 0; 2646 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2647 ret = replay_one_extent(wc->trans, root, path, 2648 eb, i, &key); 2649 if (ret) 2650 break; 2651 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2652 ret = replay_one_dir_item(wc->trans, root, path, 2653 eb, i, &key); 2654 if (ret) 2655 break; 2656 } 2657 } 2658 btrfs_free_path(path); 2659 return ret; 2660 } 2661 2662 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2663 struct btrfs_root *root, 2664 struct btrfs_path *path, int *level, 2665 struct walk_control *wc) 2666 { 2667 struct btrfs_fs_info *fs_info = root->fs_info; 2668 u64 root_owner; 2669 u64 bytenr; 2670 u64 ptr_gen; 2671 struct extent_buffer *next; 2672 struct extent_buffer *cur; 2673 struct extent_buffer *parent; 2674 u32 blocksize; 2675 int ret = 0; 2676 2677 WARN_ON(*level < 0); 2678 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2679 2680 while (*level > 0) { 2681 struct btrfs_key first_key; 2682 2683 WARN_ON(*level < 0); 2684 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2685 cur = path->nodes[*level]; 2686 2687 WARN_ON(btrfs_header_level(cur) != *level); 2688 2689 if (path->slots[*level] >= 2690 btrfs_header_nritems(cur)) 2691 break; 2692 2693 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2694 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2695 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]); 2696 blocksize = fs_info->nodesize; 2697 2698 parent = path->nodes[*level]; 2699 root_owner = btrfs_header_owner(parent); 2700 2701 next = btrfs_find_create_tree_block(fs_info, bytenr); 2702 if (IS_ERR(next)) 2703 return PTR_ERR(next); 2704 2705 if (*level == 1) { 2706 ret = wc->process_func(root, next, wc, ptr_gen, 2707 *level - 1); 2708 if (ret) { 2709 free_extent_buffer(next); 2710 return ret; 2711 } 2712 2713 path->slots[*level]++; 2714 if (wc->free) { 2715 ret = btrfs_read_buffer(next, ptr_gen, 2716 *level - 1, &first_key); 2717 if (ret) { 2718 free_extent_buffer(next); 2719 return ret; 2720 } 2721 2722 if (trans) { 2723 btrfs_tree_lock(next); 2724 btrfs_set_lock_blocking_write(next); 2725 btrfs_clean_tree_block(next); 2726 btrfs_wait_tree_block_writeback(next); 2727 btrfs_tree_unlock(next); 2728 } else { 2729 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2730 clear_extent_buffer_dirty(next); 2731 } 2732 2733 WARN_ON(root_owner != 2734 BTRFS_TREE_LOG_OBJECTID); 2735 ret = btrfs_free_and_pin_reserved_extent( 2736 fs_info, bytenr, 2737 blocksize); 2738 if (ret) { 2739 free_extent_buffer(next); 2740 return ret; 2741 } 2742 } 2743 free_extent_buffer(next); 2744 continue; 2745 } 2746 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key); 2747 if (ret) { 2748 free_extent_buffer(next); 2749 return ret; 2750 } 2751 2752 WARN_ON(*level <= 0); 2753 if (path->nodes[*level-1]) 2754 free_extent_buffer(path->nodes[*level-1]); 2755 path->nodes[*level-1] = next; 2756 *level = btrfs_header_level(next); 2757 path->slots[*level] = 0; 2758 cond_resched(); 2759 } 2760 WARN_ON(*level < 0); 2761 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2762 2763 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2764 2765 cond_resched(); 2766 return 0; 2767 } 2768 2769 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2770 struct btrfs_root *root, 2771 struct btrfs_path *path, int *level, 2772 struct walk_control *wc) 2773 { 2774 struct btrfs_fs_info *fs_info = root->fs_info; 2775 u64 root_owner; 2776 int i; 2777 int slot; 2778 int ret; 2779 2780 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2781 slot = path->slots[i]; 2782 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2783 path->slots[i]++; 2784 *level = i; 2785 WARN_ON(*level == 0); 2786 return 0; 2787 } else { 2788 struct extent_buffer *parent; 2789 if (path->nodes[*level] == root->node) 2790 parent = path->nodes[*level]; 2791 else 2792 parent = path->nodes[*level + 1]; 2793 2794 root_owner = btrfs_header_owner(parent); 2795 ret = wc->process_func(root, path->nodes[*level], wc, 2796 btrfs_header_generation(path->nodes[*level]), 2797 *level); 2798 if (ret) 2799 return ret; 2800 2801 if (wc->free) { 2802 struct extent_buffer *next; 2803 2804 next = path->nodes[*level]; 2805 2806 if (trans) { 2807 btrfs_tree_lock(next); 2808 btrfs_set_lock_blocking_write(next); 2809 btrfs_clean_tree_block(next); 2810 btrfs_wait_tree_block_writeback(next); 2811 btrfs_tree_unlock(next); 2812 } else { 2813 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2814 clear_extent_buffer_dirty(next); 2815 } 2816 2817 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2818 ret = btrfs_free_and_pin_reserved_extent( 2819 fs_info, 2820 path->nodes[*level]->start, 2821 path->nodes[*level]->len); 2822 if (ret) 2823 return ret; 2824 } 2825 free_extent_buffer(path->nodes[*level]); 2826 path->nodes[*level] = NULL; 2827 *level = i + 1; 2828 } 2829 } 2830 return 1; 2831 } 2832 2833 /* 2834 * drop the reference count on the tree rooted at 'snap'. This traverses 2835 * the tree freeing any blocks that have a ref count of zero after being 2836 * decremented. 2837 */ 2838 static int walk_log_tree(struct btrfs_trans_handle *trans, 2839 struct btrfs_root *log, struct walk_control *wc) 2840 { 2841 struct btrfs_fs_info *fs_info = log->fs_info; 2842 int ret = 0; 2843 int wret; 2844 int level; 2845 struct btrfs_path *path; 2846 int orig_level; 2847 2848 path = btrfs_alloc_path(); 2849 if (!path) 2850 return -ENOMEM; 2851 2852 level = btrfs_header_level(log->node); 2853 orig_level = level; 2854 path->nodes[level] = log->node; 2855 atomic_inc(&log->node->refs); 2856 path->slots[level] = 0; 2857 2858 while (1) { 2859 wret = walk_down_log_tree(trans, log, path, &level, wc); 2860 if (wret > 0) 2861 break; 2862 if (wret < 0) { 2863 ret = wret; 2864 goto out; 2865 } 2866 2867 wret = walk_up_log_tree(trans, log, path, &level, wc); 2868 if (wret > 0) 2869 break; 2870 if (wret < 0) { 2871 ret = wret; 2872 goto out; 2873 } 2874 } 2875 2876 /* was the root node processed? if not, catch it here */ 2877 if (path->nodes[orig_level]) { 2878 ret = wc->process_func(log, path->nodes[orig_level], wc, 2879 btrfs_header_generation(path->nodes[orig_level]), 2880 orig_level); 2881 if (ret) 2882 goto out; 2883 if (wc->free) { 2884 struct extent_buffer *next; 2885 2886 next = path->nodes[orig_level]; 2887 2888 if (trans) { 2889 btrfs_tree_lock(next); 2890 btrfs_set_lock_blocking_write(next); 2891 btrfs_clean_tree_block(next); 2892 btrfs_wait_tree_block_writeback(next); 2893 btrfs_tree_unlock(next); 2894 } else { 2895 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2896 clear_extent_buffer_dirty(next); 2897 } 2898 2899 WARN_ON(log->root_key.objectid != 2900 BTRFS_TREE_LOG_OBJECTID); 2901 ret = btrfs_free_and_pin_reserved_extent(fs_info, 2902 next->start, next->len); 2903 if (ret) 2904 goto out; 2905 } 2906 } 2907 2908 out: 2909 btrfs_free_path(path); 2910 return ret; 2911 } 2912 2913 /* 2914 * helper function to update the item for a given subvolumes log root 2915 * in the tree of log roots 2916 */ 2917 static int update_log_root(struct btrfs_trans_handle *trans, 2918 struct btrfs_root *log, 2919 struct btrfs_root_item *root_item) 2920 { 2921 struct btrfs_fs_info *fs_info = log->fs_info; 2922 int ret; 2923 2924 if (log->log_transid == 1) { 2925 /* insert root item on the first sync */ 2926 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2927 &log->root_key, root_item); 2928 } else { 2929 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2930 &log->root_key, root_item); 2931 } 2932 return ret; 2933 } 2934 2935 static void wait_log_commit(struct btrfs_root *root, int transid) 2936 { 2937 DEFINE_WAIT(wait); 2938 int index = transid % 2; 2939 2940 /* 2941 * we only allow two pending log transactions at a time, 2942 * so we know that if ours is more than 2 older than the 2943 * current transaction, we're done 2944 */ 2945 for (;;) { 2946 prepare_to_wait(&root->log_commit_wait[index], 2947 &wait, TASK_UNINTERRUPTIBLE); 2948 2949 if (!(root->log_transid_committed < transid && 2950 atomic_read(&root->log_commit[index]))) 2951 break; 2952 2953 mutex_unlock(&root->log_mutex); 2954 schedule(); 2955 mutex_lock(&root->log_mutex); 2956 } 2957 finish_wait(&root->log_commit_wait[index], &wait); 2958 } 2959 2960 static void wait_for_writer(struct btrfs_root *root) 2961 { 2962 DEFINE_WAIT(wait); 2963 2964 for (;;) { 2965 prepare_to_wait(&root->log_writer_wait, &wait, 2966 TASK_UNINTERRUPTIBLE); 2967 if (!atomic_read(&root->log_writers)) 2968 break; 2969 2970 mutex_unlock(&root->log_mutex); 2971 schedule(); 2972 mutex_lock(&root->log_mutex); 2973 } 2974 finish_wait(&root->log_writer_wait, &wait); 2975 } 2976 2977 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2978 struct btrfs_log_ctx *ctx) 2979 { 2980 if (!ctx) 2981 return; 2982 2983 mutex_lock(&root->log_mutex); 2984 list_del_init(&ctx->list); 2985 mutex_unlock(&root->log_mutex); 2986 } 2987 2988 /* 2989 * Invoked in log mutex context, or be sure there is no other task which 2990 * can access the list. 2991 */ 2992 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 2993 int index, int error) 2994 { 2995 struct btrfs_log_ctx *ctx; 2996 struct btrfs_log_ctx *safe; 2997 2998 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 2999 list_del_init(&ctx->list); 3000 ctx->log_ret = error; 3001 } 3002 3003 INIT_LIST_HEAD(&root->log_ctxs[index]); 3004 } 3005 3006 /* 3007 * btrfs_sync_log does sends a given tree log down to the disk and 3008 * updates the super blocks to record it. When this call is done, 3009 * you know that any inodes previously logged are safely on disk only 3010 * if it returns 0. 3011 * 3012 * Any other return value means you need to call btrfs_commit_transaction. 3013 * Some of the edge cases for fsyncing directories that have had unlinks 3014 * or renames done in the past mean that sometimes the only safe 3015 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 3016 * that has happened. 3017 */ 3018 int btrfs_sync_log(struct btrfs_trans_handle *trans, 3019 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 3020 { 3021 int index1; 3022 int index2; 3023 int mark; 3024 int ret; 3025 struct btrfs_fs_info *fs_info = root->fs_info; 3026 struct btrfs_root *log = root->log_root; 3027 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 3028 struct btrfs_root_item new_root_item; 3029 int log_transid = 0; 3030 struct btrfs_log_ctx root_log_ctx; 3031 struct blk_plug plug; 3032 3033 mutex_lock(&root->log_mutex); 3034 log_transid = ctx->log_transid; 3035 if (root->log_transid_committed >= log_transid) { 3036 mutex_unlock(&root->log_mutex); 3037 return ctx->log_ret; 3038 } 3039 3040 index1 = log_transid % 2; 3041 if (atomic_read(&root->log_commit[index1])) { 3042 wait_log_commit(root, log_transid); 3043 mutex_unlock(&root->log_mutex); 3044 return ctx->log_ret; 3045 } 3046 ASSERT(log_transid == root->log_transid); 3047 atomic_set(&root->log_commit[index1], 1); 3048 3049 /* wait for previous tree log sync to complete */ 3050 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 3051 wait_log_commit(root, log_transid - 1); 3052 3053 while (1) { 3054 int batch = atomic_read(&root->log_batch); 3055 /* when we're on an ssd, just kick the log commit out */ 3056 if (!btrfs_test_opt(fs_info, SSD) && 3057 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 3058 mutex_unlock(&root->log_mutex); 3059 schedule_timeout_uninterruptible(1); 3060 mutex_lock(&root->log_mutex); 3061 } 3062 wait_for_writer(root); 3063 if (batch == atomic_read(&root->log_batch)) 3064 break; 3065 } 3066 3067 /* bail out if we need to do a full commit */ 3068 if (btrfs_need_log_full_commit(trans)) { 3069 ret = -EAGAIN; 3070 mutex_unlock(&root->log_mutex); 3071 goto out; 3072 } 3073 3074 if (log_transid % 2 == 0) 3075 mark = EXTENT_DIRTY; 3076 else 3077 mark = EXTENT_NEW; 3078 3079 /* we start IO on all the marked extents here, but we don't actually 3080 * wait for them until later. 3081 */ 3082 blk_start_plug(&plug); 3083 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 3084 if (ret) { 3085 blk_finish_plug(&plug); 3086 btrfs_abort_transaction(trans, ret); 3087 btrfs_set_log_full_commit(trans); 3088 mutex_unlock(&root->log_mutex); 3089 goto out; 3090 } 3091 3092 /* 3093 * We _must_ update under the root->log_mutex in order to make sure we 3094 * have a consistent view of the log root we are trying to commit at 3095 * this moment. 3096 * 3097 * We _must_ copy this into a local copy, because we are not holding the 3098 * log_root_tree->log_mutex yet. This is important because when we 3099 * commit the log_root_tree we must have a consistent view of the 3100 * log_root_tree when we update the super block to point at the 3101 * log_root_tree bytenr. If we update the log_root_tree here we'll race 3102 * with the commit and possibly point at the new block which we may not 3103 * have written out. 3104 */ 3105 btrfs_set_root_node(&log->root_item, log->node); 3106 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); 3107 3108 root->log_transid++; 3109 log->log_transid = root->log_transid; 3110 root->log_start_pid = 0; 3111 /* 3112 * IO has been started, blocks of the log tree have WRITTEN flag set 3113 * in their headers. new modifications of the log will be written to 3114 * new positions. so it's safe to allow log writers to go in. 3115 */ 3116 mutex_unlock(&root->log_mutex); 3117 3118 btrfs_init_log_ctx(&root_log_ctx, NULL); 3119 3120 mutex_lock(&log_root_tree->log_mutex); 3121 atomic_inc(&log_root_tree->log_batch); 3122 atomic_inc(&log_root_tree->log_writers); 3123 3124 index2 = log_root_tree->log_transid % 2; 3125 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3126 root_log_ctx.log_transid = log_root_tree->log_transid; 3127 3128 mutex_unlock(&log_root_tree->log_mutex); 3129 3130 mutex_lock(&log_root_tree->log_mutex); 3131 3132 /* 3133 * Now we are safe to update the log_root_tree because we're under the 3134 * log_mutex, and we're a current writer so we're holding the commit 3135 * open until we drop the log_mutex. 3136 */ 3137 ret = update_log_root(trans, log, &new_root_item); 3138 3139 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 3140 /* atomic_dec_and_test implies a barrier */ 3141 cond_wake_up_nomb(&log_root_tree->log_writer_wait); 3142 } 3143 3144 if (ret) { 3145 if (!list_empty(&root_log_ctx.list)) 3146 list_del_init(&root_log_ctx.list); 3147 3148 blk_finish_plug(&plug); 3149 btrfs_set_log_full_commit(trans); 3150 3151 if (ret != -ENOSPC) { 3152 btrfs_abort_transaction(trans, ret); 3153 mutex_unlock(&log_root_tree->log_mutex); 3154 goto out; 3155 } 3156 btrfs_wait_tree_log_extents(log, mark); 3157 mutex_unlock(&log_root_tree->log_mutex); 3158 ret = -EAGAIN; 3159 goto out; 3160 } 3161 3162 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 3163 blk_finish_plug(&plug); 3164 list_del_init(&root_log_ctx.list); 3165 mutex_unlock(&log_root_tree->log_mutex); 3166 ret = root_log_ctx.log_ret; 3167 goto out; 3168 } 3169 3170 index2 = root_log_ctx.log_transid % 2; 3171 if (atomic_read(&log_root_tree->log_commit[index2])) { 3172 blk_finish_plug(&plug); 3173 ret = btrfs_wait_tree_log_extents(log, mark); 3174 wait_log_commit(log_root_tree, 3175 root_log_ctx.log_transid); 3176 mutex_unlock(&log_root_tree->log_mutex); 3177 if (!ret) 3178 ret = root_log_ctx.log_ret; 3179 goto out; 3180 } 3181 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3182 atomic_set(&log_root_tree->log_commit[index2], 1); 3183 3184 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 3185 wait_log_commit(log_root_tree, 3186 root_log_ctx.log_transid - 1); 3187 } 3188 3189 wait_for_writer(log_root_tree); 3190 3191 /* 3192 * now that we've moved on to the tree of log tree roots, 3193 * check the full commit flag again 3194 */ 3195 if (btrfs_need_log_full_commit(trans)) { 3196 blk_finish_plug(&plug); 3197 btrfs_wait_tree_log_extents(log, mark); 3198 mutex_unlock(&log_root_tree->log_mutex); 3199 ret = -EAGAIN; 3200 goto out_wake_log_root; 3201 } 3202 3203 ret = btrfs_write_marked_extents(fs_info, 3204 &log_root_tree->dirty_log_pages, 3205 EXTENT_DIRTY | EXTENT_NEW); 3206 blk_finish_plug(&plug); 3207 if (ret) { 3208 btrfs_set_log_full_commit(trans); 3209 btrfs_abort_transaction(trans, ret); 3210 mutex_unlock(&log_root_tree->log_mutex); 3211 goto out_wake_log_root; 3212 } 3213 ret = btrfs_wait_tree_log_extents(log, mark); 3214 if (!ret) 3215 ret = btrfs_wait_tree_log_extents(log_root_tree, 3216 EXTENT_NEW | EXTENT_DIRTY); 3217 if (ret) { 3218 btrfs_set_log_full_commit(trans); 3219 mutex_unlock(&log_root_tree->log_mutex); 3220 goto out_wake_log_root; 3221 } 3222 3223 btrfs_set_super_log_root(fs_info->super_for_commit, 3224 log_root_tree->node->start); 3225 btrfs_set_super_log_root_level(fs_info->super_for_commit, 3226 btrfs_header_level(log_root_tree->node)); 3227 3228 log_root_tree->log_transid++; 3229 mutex_unlock(&log_root_tree->log_mutex); 3230 3231 /* 3232 * Nobody else is going to jump in and write the ctree 3233 * super here because the log_commit atomic below is protecting 3234 * us. We must be called with a transaction handle pinning 3235 * the running transaction open, so a full commit can't hop 3236 * in and cause problems either. 3237 */ 3238 ret = write_all_supers(fs_info, 1); 3239 if (ret) { 3240 btrfs_set_log_full_commit(trans); 3241 btrfs_abort_transaction(trans, ret); 3242 goto out_wake_log_root; 3243 } 3244 3245 mutex_lock(&root->log_mutex); 3246 if (root->last_log_commit < log_transid) 3247 root->last_log_commit = log_transid; 3248 mutex_unlock(&root->log_mutex); 3249 3250 out_wake_log_root: 3251 mutex_lock(&log_root_tree->log_mutex); 3252 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 3253 3254 log_root_tree->log_transid_committed++; 3255 atomic_set(&log_root_tree->log_commit[index2], 0); 3256 mutex_unlock(&log_root_tree->log_mutex); 3257 3258 /* 3259 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3260 * all the updates above are seen by the woken threads. It might not be 3261 * necessary, but proving that seems to be hard. 3262 */ 3263 cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3264 out: 3265 mutex_lock(&root->log_mutex); 3266 btrfs_remove_all_log_ctxs(root, index1, ret); 3267 root->log_transid_committed++; 3268 atomic_set(&root->log_commit[index1], 0); 3269 mutex_unlock(&root->log_mutex); 3270 3271 /* 3272 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3273 * all the updates above are seen by the woken threads. It might not be 3274 * necessary, but proving that seems to be hard. 3275 */ 3276 cond_wake_up(&root->log_commit_wait[index1]); 3277 return ret; 3278 } 3279 3280 static void free_log_tree(struct btrfs_trans_handle *trans, 3281 struct btrfs_root *log) 3282 { 3283 int ret; 3284 struct walk_control wc = { 3285 .free = 1, 3286 .process_func = process_one_buffer 3287 }; 3288 3289 ret = walk_log_tree(trans, log, &wc); 3290 if (ret) { 3291 if (trans) 3292 btrfs_abort_transaction(trans, ret); 3293 else 3294 btrfs_handle_fs_error(log->fs_info, ret, NULL); 3295 } 3296 3297 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, 3298 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); 3299 free_extent_buffer(log->node); 3300 kfree(log); 3301 } 3302 3303 /* 3304 * free all the extents used by the tree log. This should be called 3305 * at commit time of the full transaction 3306 */ 3307 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3308 { 3309 if (root->log_root) { 3310 free_log_tree(trans, root->log_root); 3311 root->log_root = NULL; 3312 } 3313 return 0; 3314 } 3315 3316 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3317 struct btrfs_fs_info *fs_info) 3318 { 3319 if (fs_info->log_root_tree) { 3320 free_log_tree(trans, fs_info->log_root_tree); 3321 fs_info->log_root_tree = NULL; 3322 } 3323 return 0; 3324 } 3325 3326 /* 3327 * Check if an inode was logged in the current transaction. We can't always rely 3328 * on an inode's logged_trans value, because it's an in-memory only field and 3329 * therefore not persisted. This means that its value is lost if the inode gets 3330 * evicted and loaded again from disk (in which case it has a value of 0, and 3331 * certainly it is smaller then any possible transaction ID), when that happens 3332 * the full_sync flag is set in the inode's runtime flags, so on that case we 3333 * assume eviction happened and ignore the logged_trans value, assuming the 3334 * worst case, that the inode was logged before in the current transaction. 3335 */ 3336 static bool inode_logged(struct btrfs_trans_handle *trans, 3337 struct btrfs_inode *inode) 3338 { 3339 if (inode->logged_trans == trans->transid) 3340 return true; 3341 3342 if (inode->last_trans == trans->transid && 3343 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) && 3344 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags)) 3345 return true; 3346 3347 return false; 3348 } 3349 3350 /* 3351 * If both a file and directory are logged, and unlinks or renames are 3352 * mixed in, we have a few interesting corners: 3353 * 3354 * create file X in dir Y 3355 * link file X to X.link in dir Y 3356 * fsync file X 3357 * unlink file X but leave X.link 3358 * fsync dir Y 3359 * 3360 * After a crash we would expect only X.link to exist. But file X 3361 * didn't get fsync'd again so the log has back refs for X and X.link. 3362 * 3363 * We solve this by removing directory entries and inode backrefs from the 3364 * log when a file that was logged in the current transaction is 3365 * unlinked. Any later fsync will include the updated log entries, and 3366 * we'll be able to reconstruct the proper directory items from backrefs. 3367 * 3368 * This optimizations allows us to avoid relogging the entire inode 3369 * or the entire directory. 3370 */ 3371 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3372 struct btrfs_root *root, 3373 const char *name, int name_len, 3374 struct btrfs_inode *dir, u64 index) 3375 { 3376 struct btrfs_root *log; 3377 struct btrfs_dir_item *di; 3378 struct btrfs_path *path; 3379 int ret; 3380 int err = 0; 3381 int bytes_del = 0; 3382 u64 dir_ino = btrfs_ino(dir); 3383 3384 if (!inode_logged(trans, dir)) 3385 return 0; 3386 3387 ret = join_running_log_trans(root); 3388 if (ret) 3389 return 0; 3390 3391 mutex_lock(&dir->log_mutex); 3392 3393 log = root->log_root; 3394 path = btrfs_alloc_path(); 3395 if (!path) { 3396 err = -ENOMEM; 3397 goto out_unlock; 3398 } 3399 3400 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3401 name, name_len, -1); 3402 if (IS_ERR(di)) { 3403 err = PTR_ERR(di); 3404 goto fail; 3405 } 3406 if (di) { 3407 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3408 bytes_del += name_len; 3409 if (ret) { 3410 err = ret; 3411 goto fail; 3412 } 3413 } 3414 btrfs_release_path(path); 3415 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3416 index, name, name_len, -1); 3417 if (IS_ERR(di)) { 3418 err = PTR_ERR(di); 3419 goto fail; 3420 } 3421 if (di) { 3422 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3423 bytes_del += name_len; 3424 if (ret) { 3425 err = ret; 3426 goto fail; 3427 } 3428 } 3429 3430 /* update the directory size in the log to reflect the names 3431 * we have removed 3432 */ 3433 if (bytes_del) { 3434 struct btrfs_key key; 3435 3436 key.objectid = dir_ino; 3437 key.offset = 0; 3438 key.type = BTRFS_INODE_ITEM_KEY; 3439 btrfs_release_path(path); 3440 3441 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 3442 if (ret < 0) { 3443 err = ret; 3444 goto fail; 3445 } 3446 if (ret == 0) { 3447 struct btrfs_inode_item *item; 3448 u64 i_size; 3449 3450 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3451 struct btrfs_inode_item); 3452 i_size = btrfs_inode_size(path->nodes[0], item); 3453 if (i_size > bytes_del) 3454 i_size -= bytes_del; 3455 else 3456 i_size = 0; 3457 btrfs_set_inode_size(path->nodes[0], item, i_size); 3458 btrfs_mark_buffer_dirty(path->nodes[0]); 3459 } else 3460 ret = 0; 3461 btrfs_release_path(path); 3462 } 3463 fail: 3464 btrfs_free_path(path); 3465 out_unlock: 3466 mutex_unlock(&dir->log_mutex); 3467 if (ret == -ENOSPC) { 3468 btrfs_set_log_full_commit(trans); 3469 ret = 0; 3470 } else if (ret < 0) 3471 btrfs_abort_transaction(trans, ret); 3472 3473 btrfs_end_log_trans(root); 3474 3475 return err; 3476 } 3477 3478 /* see comments for btrfs_del_dir_entries_in_log */ 3479 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3480 struct btrfs_root *root, 3481 const char *name, int name_len, 3482 struct btrfs_inode *inode, u64 dirid) 3483 { 3484 struct btrfs_root *log; 3485 u64 index; 3486 int ret; 3487 3488 if (!inode_logged(trans, inode)) 3489 return 0; 3490 3491 ret = join_running_log_trans(root); 3492 if (ret) 3493 return 0; 3494 log = root->log_root; 3495 mutex_lock(&inode->log_mutex); 3496 3497 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3498 dirid, &index); 3499 mutex_unlock(&inode->log_mutex); 3500 if (ret == -ENOSPC) { 3501 btrfs_set_log_full_commit(trans); 3502 ret = 0; 3503 } else if (ret < 0 && ret != -ENOENT) 3504 btrfs_abort_transaction(trans, ret); 3505 btrfs_end_log_trans(root); 3506 3507 return ret; 3508 } 3509 3510 /* 3511 * creates a range item in the log for 'dirid'. first_offset and 3512 * last_offset tell us which parts of the key space the log should 3513 * be considered authoritative for. 3514 */ 3515 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3516 struct btrfs_root *log, 3517 struct btrfs_path *path, 3518 int key_type, u64 dirid, 3519 u64 first_offset, u64 last_offset) 3520 { 3521 int ret; 3522 struct btrfs_key key; 3523 struct btrfs_dir_log_item *item; 3524 3525 key.objectid = dirid; 3526 key.offset = first_offset; 3527 if (key_type == BTRFS_DIR_ITEM_KEY) 3528 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3529 else 3530 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3531 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3532 if (ret) 3533 return ret; 3534 3535 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3536 struct btrfs_dir_log_item); 3537 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3538 btrfs_mark_buffer_dirty(path->nodes[0]); 3539 btrfs_release_path(path); 3540 return 0; 3541 } 3542 3543 /* 3544 * log all the items included in the current transaction for a given 3545 * directory. This also creates the range items in the log tree required 3546 * to replay anything deleted before the fsync 3547 */ 3548 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3549 struct btrfs_root *root, struct btrfs_inode *inode, 3550 struct btrfs_path *path, 3551 struct btrfs_path *dst_path, int key_type, 3552 struct btrfs_log_ctx *ctx, 3553 u64 min_offset, u64 *last_offset_ret) 3554 { 3555 struct btrfs_key min_key; 3556 struct btrfs_root *log = root->log_root; 3557 struct extent_buffer *src; 3558 int err = 0; 3559 int ret; 3560 int i; 3561 int nritems; 3562 u64 first_offset = min_offset; 3563 u64 last_offset = (u64)-1; 3564 u64 ino = btrfs_ino(inode); 3565 3566 log = root->log_root; 3567 3568 min_key.objectid = ino; 3569 min_key.type = key_type; 3570 min_key.offset = min_offset; 3571 3572 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3573 3574 /* 3575 * we didn't find anything from this transaction, see if there 3576 * is anything at all 3577 */ 3578 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3579 min_key.objectid = ino; 3580 min_key.type = key_type; 3581 min_key.offset = (u64)-1; 3582 btrfs_release_path(path); 3583 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3584 if (ret < 0) { 3585 btrfs_release_path(path); 3586 return ret; 3587 } 3588 ret = btrfs_previous_item(root, path, ino, key_type); 3589 3590 /* if ret == 0 there are items for this type, 3591 * create a range to tell us the last key of this type. 3592 * otherwise, there are no items in this directory after 3593 * *min_offset, and we create a range to indicate that. 3594 */ 3595 if (ret == 0) { 3596 struct btrfs_key tmp; 3597 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3598 path->slots[0]); 3599 if (key_type == tmp.type) 3600 first_offset = max(min_offset, tmp.offset) + 1; 3601 } 3602 goto done; 3603 } 3604 3605 /* go backward to find any previous key */ 3606 ret = btrfs_previous_item(root, path, ino, key_type); 3607 if (ret == 0) { 3608 struct btrfs_key tmp; 3609 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3610 if (key_type == tmp.type) { 3611 first_offset = tmp.offset; 3612 ret = overwrite_item(trans, log, dst_path, 3613 path->nodes[0], path->slots[0], 3614 &tmp); 3615 if (ret) { 3616 err = ret; 3617 goto done; 3618 } 3619 } 3620 } 3621 btrfs_release_path(path); 3622 3623 /* 3624 * Find the first key from this transaction again. See the note for 3625 * log_new_dir_dentries, if we're logging a directory recursively we 3626 * won't be holding its i_mutex, which means we can modify the directory 3627 * while we're logging it. If we remove an entry between our first 3628 * search and this search we'll not find the key again and can just 3629 * bail. 3630 */ 3631 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3632 if (ret != 0) 3633 goto done; 3634 3635 /* 3636 * we have a block from this transaction, log every item in it 3637 * from our directory 3638 */ 3639 while (1) { 3640 struct btrfs_key tmp; 3641 src = path->nodes[0]; 3642 nritems = btrfs_header_nritems(src); 3643 for (i = path->slots[0]; i < nritems; i++) { 3644 struct btrfs_dir_item *di; 3645 3646 btrfs_item_key_to_cpu(src, &min_key, i); 3647 3648 if (min_key.objectid != ino || min_key.type != key_type) 3649 goto done; 3650 ret = overwrite_item(trans, log, dst_path, src, i, 3651 &min_key); 3652 if (ret) { 3653 err = ret; 3654 goto done; 3655 } 3656 3657 /* 3658 * We must make sure that when we log a directory entry, 3659 * the corresponding inode, after log replay, has a 3660 * matching link count. For example: 3661 * 3662 * touch foo 3663 * mkdir mydir 3664 * sync 3665 * ln foo mydir/bar 3666 * xfs_io -c "fsync" mydir 3667 * <crash> 3668 * <mount fs and log replay> 3669 * 3670 * Would result in a fsync log that when replayed, our 3671 * file inode would have a link count of 1, but we get 3672 * two directory entries pointing to the same inode. 3673 * After removing one of the names, it would not be 3674 * possible to remove the other name, which resulted 3675 * always in stale file handle errors, and would not 3676 * be possible to rmdir the parent directory, since 3677 * its i_size could never decrement to the value 3678 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3679 */ 3680 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3681 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3682 if (ctx && 3683 (btrfs_dir_transid(src, di) == trans->transid || 3684 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3685 tmp.type != BTRFS_ROOT_ITEM_KEY) 3686 ctx->log_new_dentries = true; 3687 } 3688 path->slots[0] = nritems; 3689 3690 /* 3691 * look ahead to the next item and see if it is also 3692 * from this directory and from this transaction 3693 */ 3694 ret = btrfs_next_leaf(root, path); 3695 if (ret) { 3696 if (ret == 1) 3697 last_offset = (u64)-1; 3698 else 3699 err = ret; 3700 goto done; 3701 } 3702 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3703 if (tmp.objectid != ino || tmp.type != key_type) { 3704 last_offset = (u64)-1; 3705 goto done; 3706 } 3707 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3708 ret = overwrite_item(trans, log, dst_path, 3709 path->nodes[0], path->slots[0], 3710 &tmp); 3711 if (ret) 3712 err = ret; 3713 else 3714 last_offset = tmp.offset; 3715 goto done; 3716 } 3717 } 3718 done: 3719 btrfs_release_path(path); 3720 btrfs_release_path(dst_path); 3721 3722 if (err == 0) { 3723 *last_offset_ret = last_offset; 3724 /* 3725 * insert the log range keys to indicate where the log 3726 * is valid 3727 */ 3728 ret = insert_dir_log_key(trans, log, path, key_type, 3729 ino, first_offset, last_offset); 3730 if (ret) 3731 err = ret; 3732 } 3733 return err; 3734 } 3735 3736 /* 3737 * logging directories is very similar to logging inodes, We find all the items 3738 * from the current transaction and write them to the log. 3739 * 3740 * The recovery code scans the directory in the subvolume, and if it finds a 3741 * key in the range logged that is not present in the log tree, then it means 3742 * that dir entry was unlinked during the transaction. 3743 * 3744 * In order for that scan to work, we must include one key smaller than 3745 * the smallest logged by this transaction and one key larger than the largest 3746 * key logged by this transaction. 3747 */ 3748 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3749 struct btrfs_root *root, struct btrfs_inode *inode, 3750 struct btrfs_path *path, 3751 struct btrfs_path *dst_path, 3752 struct btrfs_log_ctx *ctx) 3753 { 3754 u64 min_key; 3755 u64 max_key; 3756 int ret; 3757 int key_type = BTRFS_DIR_ITEM_KEY; 3758 3759 again: 3760 min_key = 0; 3761 max_key = 0; 3762 while (1) { 3763 ret = log_dir_items(trans, root, inode, path, dst_path, key_type, 3764 ctx, min_key, &max_key); 3765 if (ret) 3766 return ret; 3767 if (max_key == (u64)-1) 3768 break; 3769 min_key = max_key + 1; 3770 } 3771 3772 if (key_type == BTRFS_DIR_ITEM_KEY) { 3773 key_type = BTRFS_DIR_INDEX_KEY; 3774 goto again; 3775 } 3776 return 0; 3777 } 3778 3779 /* 3780 * a helper function to drop items from the log before we relog an 3781 * inode. max_key_type indicates the highest item type to remove. 3782 * This cannot be run for file data extents because it does not 3783 * free the extents they point to. 3784 */ 3785 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3786 struct btrfs_root *log, 3787 struct btrfs_path *path, 3788 u64 objectid, int max_key_type) 3789 { 3790 int ret; 3791 struct btrfs_key key; 3792 struct btrfs_key found_key; 3793 int start_slot; 3794 3795 key.objectid = objectid; 3796 key.type = max_key_type; 3797 key.offset = (u64)-1; 3798 3799 while (1) { 3800 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3801 BUG_ON(ret == 0); /* Logic error */ 3802 if (ret < 0) 3803 break; 3804 3805 if (path->slots[0] == 0) 3806 break; 3807 3808 path->slots[0]--; 3809 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3810 path->slots[0]); 3811 3812 if (found_key.objectid != objectid) 3813 break; 3814 3815 found_key.offset = 0; 3816 found_key.type = 0; 3817 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3818 &start_slot); 3819 if (ret < 0) 3820 break; 3821 3822 ret = btrfs_del_items(trans, log, path, start_slot, 3823 path->slots[0] - start_slot + 1); 3824 /* 3825 * If start slot isn't 0 then we don't need to re-search, we've 3826 * found the last guy with the objectid in this tree. 3827 */ 3828 if (ret || start_slot != 0) 3829 break; 3830 btrfs_release_path(path); 3831 } 3832 btrfs_release_path(path); 3833 if (ret > 0) 3834 ret = 0; 3835 return ret; 3836 } 3837 3838 static void fill_inode_item(struct btrfs_trans_handle *trans, 3839 struct extent_buffer *leaf, 3840 struct btrfs_inode_item *item, 3841 struct inode *inode, int log_inode_only, 3842 u64 logged_isize) 3843 { 3844 struct btrfs_map_token token; 3845 3846 btrfs_init_map_token(&token, leaf); 3847 3848 if (log_inode_only) { 3849 /* set the generation to zero so the recover code 3850 * can tell the difference between an logging 3851 * just to say 'this inode exists' and a logging 3852 * to say 'update this inode with these values' 3853 */ 3854 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3855 btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3856 } else { 3857 btrfs_set_token_inode_generation(leaf, item, 3858 BTRFS_I(inode)->generation, 3859 &token); 3860 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3861 } 3862 3863 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3864 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3865 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3866 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3867 3868 btrfs_set_token_timespec_sec(leaf, &item->atime, 3869 inode->i_atime.tv_sec, &token); 3870 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3871 inode->i_atime.tv_nsec, &token); 3872 3873 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3874 inode->i_mtime.tv_sec, &token); 3875 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3876 inode->i_mtime.tv_nsec, &token); 3877 3878 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3879 inode->i_ctime.tv_sec, &token); 3880 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3881 inode->i_ctime.tv_nsec, &token); 3882 3883 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3884 &token); 3885 3886 btrfs_set_token_inode_sequence(leaf, item, 3887 inode_peek_iversion(inode), &token); 3888 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3889 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3890 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3891 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3892 } 3893 3894 static int log_inode_item(struct btrfs_trans_handle *trans, 3895 struct btrfs_root *log, struct btrfs_path *path, 3896 struct btrfs_inode *inode) 3897 { 3898 struct btrfs_inode_item *inode_item; 3899 int ret; 3900 3901 ret = btrfs_insert_empty_item(trans, log, path, 3902 &inode->location, sizeof(*inode_item)); 3903 if (ret && ret != -EEXIST) 3904 return ret; 3905 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3906 struct btrfs_inode_item); 3907 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 3908 0, 0); 3909 btrfs_release_path(path); 3910 return 0; 3911 } 3912 3913 static int log_csums(struct btrfs_trans_handle *trans, 3914 struct btrfs_root *log_root, 3915 struct btrfs_ordered_sum *sums) 3916 { 3917 int ret; 3918 3919 /* 3920 * Due to extent cloning, we might have logged a csum item that covers a 3921 * subrange of a cloned extent, and later we can end up logging a csum 3922 * item for a larger subrange of the same extent or the entire range. 3923 * This would leave csum items in the log tree that cover the same range 3924 * and break the searches for checksums in the log tree, resulting in 3925 * some checksums missing in the fs/subvolume tree. So just delete (or 3926 * trim and adjust) any existing csum items in the log for this range. 3927 */ 3928 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); 3929 if (ret) 3930 return ret; 3931 3932 return btrfs_csum_file_blocks(trans, log_root, sums); 3933 } 3934 3935 static noinline int copy_items(struct btrfs_trans_handle *trans, 3936 struct btrfs_inode *inode, 3937 struct btrfs_path *dst_path, 3938 struct btrfs_path *src_path, u64 *last_extent, 3939 int start_slot, int nr, int inode_only, 3940 u64 logged_isize) 3941 { 3942 struct btrfs_fs_info *fs_info = trans->fs_info; 3943 unsigned long src_offset; 3944 unsigned long dst_offset; 3945 struct btrfs_root *log = inode->root->log_root; 3946 struct btrfs_file_extent_item *extent; 3947 struct btrfs_inode_item *inode_item; 3948 struct extent_buffer *src = src_path->nodes[0]; 3949 struct btrfs_key first_key, last_key, key; 3950 int ret; 3951 struct btrfs_key *ins_keys; 3952 u32 *ins_sizes; 3953 char *ins_data; 3954 int i; 3955 struct list_head ordered_sums; 3956 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; 3957 bool has_extents = false; 3958 bool need_find_last_extent = true; 3959 bool done = false; 3960 3961 INIT_LIST_HEAD(&ordered_sums); 3962 3963 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3964 nr * sizeof(u32), GFP_NOFS); 3965 if (!ins_data) 3966 return -ENOMEM; 3967 3968 first_key.objectid = (u64)-1; 3969 3970 ins_sizes = (u32 *)ins_data; 3971 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3972 3973 for (i = 0; i < nr; i++) { 3974 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3975 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3976 } 3977 ret = btrfs_insert_empty_items(trans, log, dst_path, 3978 ins_keys, ins_sizes, nr); 3979 if (ret) { 3980 kfree(ins_data); 3981 return ret; 3982 } 3983 3984 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3985 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3986 dst_path->slots[0]); 3987 3988 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3989 3990 if (i == nr - 1) 3991 last_key = ins_keys[i]; 3992 3993 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3994 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3995 dst_path->slots[0], 3996 struct btrfs_inode_item); 3997 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3998 &inode->vfs_inode, 3999 inode_only == LOG_INODE_EXISTS, 4000 logged_isize); 4001 } else { 4002 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 4003 src_offset, ins_sizes[i]); 4004 } 4005 4006 /* 4007 * We set need_find_last_extent here in case we know we were 4008 * processing other items and then walk into the first extent in 4009 * the inode. If we don't hit an extent then nothing changes, 4010 * we'll do the last search the next time around. 4011 */ 4012 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 4013 has_extents = true; 4014 if (first_key.objectid == (u64)-1) 4015 first_key = ins_keys[i]; 4016 } else { 4017 need_find_last_extent = false; 4018 } 4019 4020 /* take a reference on file data extents so that truncates 4021 * or deletes of this inode don't have to relog the inode 4022 * again 4023 */ 4024 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 4025 !skip_csum) { 4026 int found_type; 4027 extent = btrfs_item_ptr(src, start_slot + i, 4028 struct btrfs_file_extent_item); 4029 4030 if (btrfs_file_extent_generation(src, extent) < trans->transid) 4031 continue; 4032 4033 found_type = btrfs_file_extent_type(src, extent); 4034 if (found_type == BTRFS_FILE_EXTENT_REG) { 4035 u64 ds, dl, cs, cl; 4036 ds = btrfs_file_extent_disk_bytenr(src, 4037 extent); 4038 /* ds == 0 is a hole */ 4039 if (ds == 0) 4040 continue; 4041 4042 dl = btrfs_file_extent_disk_num_bytes(src, 4043 extent); 4044 cs = btrfs_file_extent_offset(src, extent); 4045 cl = btrfs_file_extent_num_bytes(src, 4046 extent); 4047 if (btrfs_file_extent_compression(src, 4048 extent)) { 4049 cs = 0; 4050 cl = dl; 4051 } 4052 4053 ret = btrfs_lookup_csums_range( 4054 fs_info->csum_root, 4055 ds + cs, ds + cs + cl - 1, 4056 &ordered_sums, 0); 4057 if (ret) { 4058 btrfs_release_path(dst_path); 4059 kfree(ins_data); 4060 return ret; 4061 } 4062 } 4063 } 4064 } 4065 4066 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 4067 btrfs_release_path(dst_path); 4068 kfree(ins_data); 4069 4070 /* 4071 * we have to do this after the loop above to avoid changing the 4072 * log tree while trying to change the log tree. 4073 */ 4074 ret = 0; 4075 while (!list_empty(&ordered_sums)) { 4076 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4077 struct btrfs_ordered_sum, 4078 list); 4079 if (!ret) 4080 ret = log_csums(trans, log, sums); 4081 list_del(&sums->list); 4082 kfree(sums); 4083 } 4084 4085 if (!has_extents) 4086 return ret; 4087 4088 if (need_find_last_extent && *last_extent == first_key.offset) { 4089 /* 4090 * We don't have any leafs between our current one and the one 4091 * we processed before that can have file extent items for our 4092 * inode (and have a generation number smaller than our current 4093 * transaction id). 4094 */ 4095 need_find_last_extent = false; 4096 } 4097 4098 /* 4099 * Because we use btrfs_search_forward we could skip leaves that were 4100 * not modified and then assume *last_extent is valid when it really 4101 * isn't. So back up to the previous leaf and read the end of the last 4102 * extent before we go and fill in holes. 4103 */ 4104 if (need_find_last_extent) { 4105 u64 len; 4106 4107 ret = btrfs_prev_leaf(inode->root, src_path); 4108 if (ret < 0) 4109 return ret; 4110 if (ret) 4111 goto fill_holes; 4112 if (src_path->slots[0]) 4113 src_path->slots[0]--; 4114 src = src_path->nodes[0]; 4115 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 4116 if (key.objectid != btrfs_ino(inode) || 4117 key.type != BTRFS_EXTENT_DATA_KEY) 4118 goto fill_holes; 4119 extent = btrfs_item_ptr(src, src_path->slots[0], 4120 struct btrfs_file_extent_item); 4121 if (btrfs_file_extent_type(src, extent) == 4122 BTRFS_FILE_EXTENT_INLINE) { 4123 len = btrfs_file_extent_ram_bytes(src, extent); 4124 *last_extent = ALIGN(key.offset + len, 4125 fs_info->sectorsize); 4126 } else { 4127 len = btrfs_file_extent_num_bytes(src, extent); 4128 *last_extent = key.offset + len; 4129 } 4130 } 4131 fill_holes: 4132 /* So we did prev_leaf, now we need to move to the next leaf, but a few 4133 * things could have happened 4134 * 4135 * 1) A merge could have happened, so we could currently be on a leaf 4136 * that holds what we were copying in the first place. 4137 * 2) A split could have happened, and now not all of the items we want 4138 * are on the same leaf. 4139 * 4140 * So we need to adjust how we search for holes, we need to drop the 4141 * path and re-search for the first extent key we found, and then walk 4142 * forward until we hit the last one we copied. 4143 */ 4144 if (need_find_last_extent) { 4145 /* btrfs_prev_leaf could return 1 without releasing the path */ 4146 btrfs_release_path(src_path); 4147 ret = btrfs_search_slot(NULL, inode->root, &first_key, 4148 src_path, 0, 0); 4149 if (ret < 0) 4150 return ret; 4151 ASSERT(ret == 0); 4152 src = src_path->nodes[0]; 4153 i = src_path->slots[0]; 4154 } else { 4155 i = start_slot; 4156 } 4157 4158 /* 4159 * Ok so here we need to go through and fill in any holes we may have 4160 * to make sure that holes are punched for those areas in case they had 4161 * extents previously. 4162 */ 4163 while (!done) { 4164 u64 offset, len; 4165 u64 extent_end; 4166 4167 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 4168 ret = btrfs_next_leaf(inode->root, src_path); 4169 if (ret < 0) 4170 return ret; 4171 ASSERT(ret == 0); 4172 src = src_path->nodes[0]; 4173 i = 0; 4174 need_find_last_extent = true; 4175 } 4176 4177 btrfs_item_key_to_cpu(src, &key, i); 4178 if (!btrfs_comp_cpu_keys(&key, &last_key)) 4179 done = true; 4180 if (key.objectid != btrfs_ino(inode) || 4181 key.type != BTRFS_EXTENT_DATA_KEY) { 4182 i++; 4183 continue; 4184 } 4185 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 4186 if (btrfs_file_extent_type(src, extent) == 4187 BTRFS_FILE_EXTENT_INLINE) { 4188 len = btrfs_file_extent_ram_bytes(src, extent); 4189 extent_end = ALIGN(key.offset + len, 4190 fs_info->sectorsize); 4191 } else { 4192 len = btrfs_file_extent_num_bytes(src, extent); 4193 extent_end = key.offset + len; 4194 } 4195 i++; 4196 4197 if (*last_extent == key.offset) { 4198 *last_extent = extent_end; 4199 continue; 4200 } 4201 offset = *last_extent; 4202 len = key.offset - *last_extent; 4203 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 4204 offset, 0, 0, len, 0, len, 0, 0, 0); 4205 if (ret) 4206 break; 4207 *last_extent = extent_end; 4208 } 4209 4210 /* 4211 * Check if there is a hole between the last extent found in our leaf 4212 * and the first extent in the next leaf. If there is one, we need to 4213 * log an explicit hole so that at replay time we can punch the hole. 4214 */ 4215 if (ret == 0 && 4216 key.objectid == btrfs_ino(inode) && 4217 key.type == BTRFS_EXTENT_DATA_KEY && 4218 i == btrfs_header_nritems(src_path->nodes[0])) { 4219 ret = btrfs_next_leaf(inode->root, src_path); 4220 need_find_last_extent = true; 4221 if (ret > 0) { 4222 ret = 0; 4223 } else if (ret == 0) { 4224 btrfs_item_key_to_cpu(src_path->nodes[0], &key, 4225 src_path->slots[0]); 4226 if (key.objectid == btrfs_ino(inode) && 4227 key.type == BTRFS_EXTENT_DATA_KEY && 4228 *last_extent < key.offset) { 4229 const u64 len = key.offset - *last_extent; 4230 4231 ret = btrfs_insert_file_extent(trans, log, 4232 btrfs_ino(inode), 4233 *last_extent, 0, 4234 0, len, 0, len, 4235 0, 0, 0); 4236 *last_extent += len; 4237 } 4238 } 4239 } 4240 /* 4241 * Need to let the callers know we dropped the path so they should 4242 * re-search. 4243 */ 4244 if (!ret && need_find_last_extent) 4245 ret = 1; 4246 return ret; 4247 } 4248 4249 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 4250 { 4251 struct extent_map *em1, *em2; 4252 4253 em1 = list_entry(a, struct extent_map, list); 4254 em2 = list_entry(b, struct extent_map, list); 4255 4256 if (em1->start < em2->start) 4257 return -1; 4258 else if (em1->start > em2->start) 4259 return 1; 4260 return 0; 4261 } 4262 4263 static int log_extent_csums(struct btrfs_trans_handle *trans, 4264 struct btrfs_inode *inode, 4265 struct btrfs_root *log_root, 4266 const struct extent_map *em) 4267 { 4268 u64 csum_offset; 4269 u64 csum_len; 4270 LIST_HEAD(ordered_sums); 4271 int ret = 0; 4272 4273 if (inode->flags & BTRFS_INODE_NODATASUM || 4274 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 4275 em->block_start == EXTENT_MAP_HOLE) 4276 return 0; 4277 4278 /* If we're compressed we have to save the entire range of csums. */ 4279 if (em->compress_type) { 4280 csum_offset = 0; 4281 csum_len = max(em->block_len, em->orig_block_len); 4282 } else { 4283 csum_offset = em->mod_start - em->start; 4284 csum_len = em->mod_len; 4285 } 4286 4287 /* block start is already adjusted for the file extent offset. */ 4288 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root, 4289 em->block_start + csum_offset, 4290 em->block_start + csum_offset + 4291 csum_len - 1, &ordered_sums, 0); 4292 if (ret) 4293 return ret; 4294 4295 while (!list_empty(&ordered_sums)) { 4296 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4297 struct btrfs_ordered_sum, 4298 list); 4299 if (!ret) 4300 ret = log_csums(trans, log_root, sums); 4301 list_del(&sums->list); 4302 kfree(sums); 4303 } 4304 4305 return ret; 4306 } 4307 4308 static int log_one_extent(struct btrfs_trans_handle *trans, 4309 struct btrfs_inode *inode, struct btrfs_root *root, 4310 const struct extent_map *em, 4311 struct btrfs_path *path, 4312 struct btrfs_log_ctx *ctx) 4313 { 4314 struct btrfs_root *log = root->log_root; 4315 struct btrfs_file_extent_item *fi; 4316 struct extent_buffer *leaf; 4317 struct btrfs_map_token token; 4318 struct btrfs_key key; 4319 u64 extent_offset = em->start - em->orig_start; 4320 u64 block_len; 4321 int ret; 4322 int extent_inserted = 0; 4323 4324 ret = log_extent_csums(trans, inode, log, em); 4325 if (ret) 4326 return ret; 4327 4328 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, 4329 em->start + em->len, NULL, 0, 1, 4330 sizeof(*fi), &extent_inserted); 4331 if (ret) 4332 return ret; 4333 4334 if (!extent_inserted) { 4335 key.objectid = btrfs_ino(inode); 4336 key.type = BTRFS_EXTENT_DATA_KEY; 4337 key.offset = em->start; 4338 4339 ret = btrfs_insert_empty_item(trans, log, path, &key, 4340 sizeof(*fi)); 4341 if (ret) 4342 return ret; 4343 } 4344 leaf = path->nodes[0]; 4345 btrfs_init_map_token(&token, leaf); 4346 fi = btrfs_item_ptr(leaf, path->slots[0], 4347 struct btrfs_file_extent_item); 4348 4349 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4350 &token); 4351 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4352 btrfs_set_token_file_extent_type(leaf, fi, 4353 BTRFS_FILE_EXTENT_PREALLOC, 4354 &token); 4355 else 4356 btrfs_set_token_file_extent_type(leaf, fi, 4357 BTRFS_FILE_EXTENT_REG, 4358 &token); 4359 4360 block_len = max(em->block_len, em->orig_block_len); 4361 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4362 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4363 em->block_start, 4364 &token); 4365 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4366 &token); 4367 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4368 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4369 em->block_start - 4370 extent_offset, &token); 4371 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4372 &token); 4373 } else { 4374 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4375 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4376 &token); 4377 } 4378 4379 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4380 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4381 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4382 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4383 &token); 4384 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4385 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4386 btrfs_mark_buffer_dirty(leaf); 4387 4388 btrfs_release_path(path); 4389 4390 return ret; 4391 } 4392 4393 /* 4394 * Log all prealloc extents beyond the inode's i_size to make sure we do not 4395 * lose them after doing a fast fsync and replaying the log. We scan the 4396 * subvolume's root instead of iterating the inode's extent map tree because 4397 * otherwise we can log incorrect extent items based on extent map conversion. 4398 * That can happen due to the fact that extent maps are merged when they 4399 * are not in the extent map tree's list of modified extents. 4400 */ 4401 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, 4402 struct btrfs_inode *inode, 4403 struct btrfs_path *path) 4404 { 4405 struct btrfs_root *root = inode->root; 4406 struct btrfs_key key; 4407 const u64 i_size = i_size_read(&inode->vfs_inode); 4408 const u64 ino = btrfs_ino(inode); 4409 struct btrfs_path *dst_path = NULL; 4410 u64 last_extent = (u64)-1; 4411 int ins_nr = 0; 4412 int start_slot; 4413 int ret; 4414 4415 if (!(inode->flags & BTRFS_INODE_PREALLOC)) 4416 return 0; 4417 4418 key.objectid = ino; 4419 key.type = BTRFS_EXTENT_DATA_KEY; 4420 key.offset = i_size; 4421 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4422 if (ret < 0) 4423 goto out; 4424 4425 while (true) { 4426 struct extent_buffer *leaf = path->nodes[0]; 4427 int slot = path->slots[0]; 4428 4429 if (slot >= btrfs_header_nritems(leaf)) { 4430 if (ins_nr > 0) { 4431 ret = copy_items(trans, inode, dst_path, path, 4432 &last_extent, start_slot, 4433 ins_nr, 1, 0); 4434 if (ret < 0) 4435 goto out; 4436 ins_nr = 0; 4437 } 4438 ret = btrfs_next_leaf(root, path); 4439 if (ret < 0) 4440 goto out; 4441 if (ret > 0) { 4442 ret = 0; 4443 break; 4444 } 4445 continue; 4446 } 4447 4448 btrfs_item_key_to_cpu(leaf, &key, slot); 4449 if (key.objectid > ino) 4450 break; 4451 if (WARN_ON_ONCE(key.objectid < ino) || 4452 key.type < BTRFS_EXTENT_DATA_KEY || 4453 key.offset < i_size) { 4454 path->slots[0]++; 4455 continue; 4456 } 4457 if (last_extent == (u64)-1) { 4458 last_extent = key.offset; 4459 /* 4460 * Avoid logging extent items logged in past fsync calls 4461 * and leading to duplicate keys in the log tree. 4462 */ 4463 do { 4464 ret = btrfs_truncate_inode_items(trans, 4465 root->log_root, 4466 &inode->vfs_inode, 4467 i_size, 4468 BTRFS_EXTENT_DATA_KEY); 4469 } while (ret == -EAGAIN); 4470 if (ret) 4471 goto out; 4472 } 4473 if (ins_nr == 0) 4474 start_slot = slot; 4475 ins_nr++; 4476 path->slots[0]++; 4477 if (!dst_path) { 4478 dst_path = btrfs_alloc_path(); 4479 if (!dst_path) { 4480 ret = -ENOMEM; 4481 goto out; 4482 } 4483 } 4484 } 4485 if (ins_nr > 0) { 4486 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4487 start_slot, ins_nr, 1, 0); 4488 if (ret > 0) 4489 ret = 0; 4490 } 4491 out: 4492 btrfs_release_path(path); 4493 btrfs_free_path(dst_path); 4494 return ret; 4495 } 4496 4497 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4498 struct btrfs_root *root, 4499 struct btrfs_inode *inode, 4500 struct btrfs_path *path, 4501 struct btrfs_log_ctx *ctx, 4502 const u64 start, 4503 const u64 end) 4504 { 4505 struct extent_map *em, *n; 4506 struct list_head extents; 4507 struct extent_map_tree *tree = &inode->extent_tree; 4508 u64 test_gen; 4509 int ret = 0; 4510 int num = 0; 4511 4512 INIT_LIST_HEAD(&extents); 4513 4514 write_lock(&tree->lock); 4515 test_gen = root->fs_info->last_trans_committed; 4516 4517 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4518 /* 4519 * Skip extents outside our logging range. It's important to do 4520 * it for correctness because if we don't ignore them, we may 4521 * log them before their ordered extent completes, and therefore 4522 * we could log them without logging their respective checksums 4523 * (the checksum items are added to the csum tree at the very 4524 * end of btrfs_finish_ordered_io()). Also leave such extents 4525 * outside of our range in the list, since we may have another 4526 * ranged fsync in the near future that needs them. If an extent 4527 * outside our range corresponds to a hole, log it to avoid 4528 * leaving gaps between extents (fsck will complain when we are 4529 * not using the NO_HOLES feature). 4530 */ 4531 if ((em->start > end || em->start + em->len <= start) && 4532 em->block_start != EXTENT_MAP_HOLE) 4533 continue; 4534 4535 list_del_init(&em->list); 4536 /* 4537 * Just an arbitrary number, this can be really CPU intensive 4538 * once we start getting a lot of extents, and really once we 4539 * have a bunch of extents we just want to commit since it will 4540 * be faster. 4541 */ 4542 if (++num > 32768) { 4543 list_del_init(&tree->modified_extents); 4544 ret = -EFBIG; 4545 goto process; 4546 } 4547 4548 if (em->generation <= test_gen) 4549 continue; 4550 4551 /* We log prealloc extents beyond eof later. */ 4552 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && 4553 em->start >= i_size_read(&inode->vfs_inode)) 4554 continue; 4555 4556 /* Need a ref to keep it from getting evicted from cache */ 4557 refcount_inc(&em->refs); 4558 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4559 list_add_tail(&em->list, &extents); 4560 num++; 4561 } 4562 4563 list_sort(NULL, &extents, extent_cmp); 4564 process: 4565 while (!list_empty(&extents)) { 4566 em = list_entry(extents.next, struct extent_map, list); 4567 4568 list_del_init(&em->list); 4569 4570 /* 4571 * If we had an error we just need to delete everybody from our 4572 * private list. 4573 */ 4574 if (ret) { 4575 clear_em_logging(tree, em); 4576 free_extent_map(em); 4577 continue; 4578 } 4579 4580 write_unlock(&tree->lock); 4581 4582 ret = log_one_extent(trans, inode, root, em, path, ctx); 4583 write_lock(&tree->lock); 4584 clear_em_logging(tree, em); 4585 free_extent_map(em); 4586 } 4587 WARN_ON(!list_empty(&extents)); 4588 write_unlock(&tree->lock); 4589 4590 btrfs_release_path(path); 4591 if (!ret) 4592 ret = btrfs_log_prealloc_extents(trans, inode, path); 4593 4594 return ret; 4595 } 4596 4597 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, 4598 struct btrfs_path *path, u64 *size_ret) 4599 { 4600 struct btrfs_key key; 4601 int ret; 4602 4603 key.objectid = btrfs_ino(inode); 4604 key.type = BTRFS_INODE_ITEM_KEY; 4605 key.offset = 0; 4606 4607 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4608 if (ret < 0) { 4609 return ret; 4610 } else if (ret > 0) { 4611 *size_ret = 0; 4612 } else { 4613 struct btrfs_inode_item *item; 4614 4615 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4616 struct btrfs_inode_item); 4617 *size_ret = btrfs_inode_size(path->nodes[0], item); 4618 /* 4619 * If the in-memory inode's i_size is smaller then the inode 4620 * size stored in the btree, return the inode's i_size, so 4621 * that we get a correct inode size after replaying the log 4622 * when before a power failure we had a shrinking truncate 4623 * followed by addition of a new name (rename / new hard link). 4624 * Otherwise return the inode size from the btree, to avoid 4625 * data loss when replaying a log due to previously doing a 4626 * write that expands the inode's size and logging a new name 4627 * immediately after. 4628 */ 4629 if (*size_ret > inode->vfs_inode.i_size) 4630 *size_ret = inode->vfs_inode.i_size; 4631 } 4632 4633 btrfs_release_path(path); 4634 return 0; 4635 } 4636 4637 /* 4638 * At the moment we always log all xattrs. This is to figure out at log replay 4639 * time which xattrs must have their deletion replayed. If a xattr is missing 4640 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4641 * because if a xattr is deleted, the inode is fsynced and a power failure 4642 * happens, causing the log to be replayed the next time the fs is mounted, 4643 * we want the xattr to not exist anymore (same behaviour as other filesystems 4644 * with a journal, ext3/4, xfs, f2fs, etc). 4645 */ 4646 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4647 struct btrfs_root *root, 4648 struct btrfs_inode *inode, 4649 struct btrfs_path *path, 4650 struct btrfs_path *dst_path) 4651 { 4652 int ret; 4653 struct btrfs_key key; 4654 const u64 ino = btrfs_ino(inode); 4655 int ins_nr = 0; 4656 int start_slot = 0; 4657 4658 key.objectid = ino; 4659 key.type = BTRFS_XATTR_ITEM_KEY; 4660 key.offset = 0; 4661 4662 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4663 if (ret < 0) 4664 return ret; 4665 4666 while (true) { 4667 int slot = path->slots[0]; 4668 struct extent_buffer *leaf = path->nodes[0]; 4669 int nritems = btrfs_header_nritems(leaf); 4670 4671 if (slot >= nritems) { 4672 if (ins_nr > 0) { 4673 u64 last_extent = 0; 4674 4675 ret = copy_items(trans, inode, dst_path, path, 4676 &last_extent, start_slot, 4677 ins_nr, 1, 0); 4678 /* can't be 1, extent items aren't processed */ 4679 ASSERT(ret <= 0); 4680 if (ret < 0) 4681 return ret; 4682 ins_nr = 0; 4683 } 4684 ret = btrfs_next_leaf(root, path); 4685 if (ret < 0) 4686 return ret; 4687 else if (ret > 0) 4688 break; 4689 continue; 4690 } 4691 4692 btrfs_item_key_to_cpu(leaf, &key, slot); 4693 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4694 break; 4695 4696 if (ins_nr == 0) 4697 start_slot = slot; 4698 ins_nr++; 4699 path->slots[0]++; 4700 cond_resched(); 4701 } 4702 if (ins_nr > 0) { 4703 u64 last_extent = 0; 4704 4705 ret = copy_items(trans, inode, dst_path, path, 4706 &last_extent, start_slot, 4707 ins_nr, 1, 0); 4708 /* can't be 1, extent items aren't processed */ 4709 ASSERT(ret <= 0); 4710 if (ret < 0) 4711 return ret; 4712 } 4713 4714 return 0; 4715 } 4716 4717 /* 4718 * If the no holes feature is enabled we need to make sure any hole between the 4719 * last extent and the i_size of our inode is explicitly marked in the log. This 4720 * is to make sure that doing something like: 4721 * 4722 * 1) create file with 128Kb of data 4723 * 2) truncate file to 64Kb 4724 * 3) truncate file to 256Kb 4725 * 4) fsync file 4726 * 5) <crash/power failure> 4727 * 6) mount fs and trigger log replay 4728 * 4729 * Will give us a file with a size of 256Kb, the first 64Kb of data match what 4730 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the 4731 * file correspond to a hole. The presence of explicit holes in a log tree is 4732 * what guarantees that log replay will remove/adjust file extent items in the 4733 * fs/subvol tree. 4734 * 4735 * Here we do not need to care about holes between extents, that is already done 4736 * by copy_items(). We also only need to do this in the full sync path, where we 4737 * lookup for extents from the fs/subvol tree only. In the fast path case, we 4738 * lookup the list of modified extent maps and if any represents a hole, we 4739 * insert a corresponding extent representing a hole in the log tree. 4740 */ 4741 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, 4742 struct btrfs_root *root, 4743 struct btrfs_inode *inode, 4744 struct btrfs_path *path) 4745 { 4746 struct btrfs_fs_info *fs_info = root->fs_info; 4747 int ret; 4748 struct btrfs_key key; 4749 u64 hole_start; 4750 u64 hole_size; 4751 struct extent_buffer *leaf; 4752 struct btrfs_root *log = root->log_root; 4753 const u64 ino = btrfs_ino(inode); 4754 const u64 i_size = i_size_read(&inode->vfs_inode); 4755 4756 if (!btrfs_fs_incompat(fs_info, NO_HOLES)) 4757 return 0; 4758 4759 key.objectid = ino; 4760 key.type = BTRFS_EXTENT_DATA_KEY; 4761 key.offset = (u64)-1; 4762 4763 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4764 ASSERT(ret != 0); 4765 if (ret < 0) 4766 return ret; 4767 4768 ASSERT(path->slots[0] > 0); 4769 path->slots[0]--; 4770 leaf = path->nodes[0]; 4771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4772 4773 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { 4774 /* inode does not have any extents */ 4775 hole_start = 0; 4776 hole_size = i_size; 4777 } else { 4778 struct btrfs_file_extent_item *extent; 4779 u64 len; 4780 4781 /* 4782 * If there's an extent beyond i_size, an explicit hole was 4783 * already inserted by copy_items(). 4784 */ 4785 if (key.offset >= i_size) 4786 return 0; 4787 4788 extent = btrfs_item_ptr(leaf, path->slots[0], 4789 struct btrfs_file_extent_item); 4790 4791 if (btrfs_file_extent_type(leaf, extent) == 4792 BTRFS_FILE_EXTENT_INLINE) 4793 return 0; 4794 4795 len = btrfs_file_extent_num_bytes(leaf, extent); 4796 /* Last extent goes beyond i_size, no need to log a hole. */ 4797 if (key.offset + len > i_size) 4798 return 0; 4799 hole_start = key.offset + len; 4800 hole_size = i_size - hole_start; 4801 } 4802 btrfs_release_path(path); 4803 4804 /* Last extent ends at i_size. */ 4805 if (hole_size == 0) 4806 return 0; 4807 4808 hole_size = ALIGN(hole_size, fs_info->sectorsize); 4809 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, 4810 hole_size, 0, hole_size, 0, 0, 0); 4811 return ret; 4812 } 4813 4814 /* 4815 * When we are logging a new inode X, check if it doesn't have a reference that 4816 * matches the reference from some other inode Y created in a past transaction 4817 * and that was renamed in the current transaction. If we don't do this, then at 4818 * log replay time we can lose inode Y (and all its files if it's a directory): 4819 * 4820 * mkdir /mnt/x 4821 * echo "hello world" > /mnt/x/foobar 4822 * sync 4823 * mv /mnt/x /mnt/y 4824 * mkdir /mnt/x # or touch /mnt/x 4825 * xfs_io -c fsync /mnt/x 4826 * <power fail> 4827 * mount fs, trigger log replay 4828 * 4829 * After the log replay procedure, we would lose the first directory and all its 4830 * files (file foobar). 4831 * For the case where inode Y is not a directory we simply end up losing it: 4832 * 4833 * echo "123" > /mnt/foo 4834 * sync 4835 * mv /mnt/foo /mnt/bar 4836 * echo "abc" > /mnt/foo 4837 * xfs_io -c fsync /mnt/foo 4838 * <power fail> 4839 * 4840 * We also need this for cases where a snapshot entry is replaced by some other 4841 * entry (file or directory) otherwise we end up with an unreplayable log due to 4842 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4843 * if it were a regular entry: 4844 * 4845 * mkdir /mnt/x 4846 * btrfs subvolume snapshot /mnt /mnt/x/snap 4847 * btrfs subvolume delete /mnt/x/snap 4848 * rmdir /mnt/x 4849 * mkdir /mnt/x 4850 * fsync /mnt/x or fsync some new file inside it 4851 * <power fail> 4852 * 4853 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4854 * the same transaction. 4855 */ 4856 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4857 const int slot, 4858 const struct btrfs_key *key, 4859 struct btrfs_inode *inode, 4860 u64 *other_ino, u64 *other_parent) 4861 { 4862 int ret; 4863 struct btrfs_path *search_path; 4864 char *name = NULL; 4865 u32 name_len = 0; 4866 u32 item_size = btrfs_item_size_nr(eb, slot); 4867 u32 cur_offset = 0; 4868 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4869 4870 search_path = btrfs_alloc_path(); 4871 if (!search_path) 4872 return -ENOMEM; 4873 search_path->search_commit_root = 1; 4874 search_path->skip_locking = 1; 4875 4876 while (cur_offset < item_size) { 4877 u64 parent; 4878 u32 this_name_len; 4879 u32 this_len; 4880 unsigned long name_ptr; 4881 struct btrfs_dir_item *di; 4882 4883 if (key->type == BTRFS_INODE_REF_KEY) { 4884 struct btrfs_inode_ref *iref; 4885 4886 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4887 parent = key->offset; 4888 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4889 name_ptr = (unsigned long)(iref + 1); 4890 this_len = sizeof(*iref) + this_name_len; 4891 } else { 4892 struct btrfs_inode_extref *extref; 4893 4894 extref = (struct btrfs_inode_extref *)(ptr + 4895 cur_offset); 4896 parent = btrfs_inode_extref_parent(eb, extref); 4897 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4898 name_ptr = (unsigned long)&extref->name; 4899 this_len = sizeof(*extref) + this_name_len; 4900 } 4901 4902 if (this_name_len > name_len) { 4903 char *new_name; 4904 4905 new_name = krealloc(name, this_name_len, GFP_NOFS); 4906 if (!new_name) { 4907 ret = -ENOMEM; 4908 goto out; 4909 } 4910 name_len = this_name_len; 4911 name = new_name; 4912 } 4913 4914 read_extent_buffer(eb, name, name_ptr, this_name_len); 4915 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, 4916 parent, name, this_name_len, 0); 4917 if (di && !IS_ERR(di)) { 4918 struct btrfs_key di_key; 4919 4920 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4921 di, &di_key); 4922 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4923 if (di_key.objectid != key->objectid) { 4924 ret = 1; 4925 *other_ino = di_key.objectid; 4926 *other_parent = parent; 4927 } else { 4928 ret = 0; 4929 } 4930 } else { 4931 ret = -EAGAIN; 4932 } 4933 goto out; 4934 } else if (IS_ERR(di)) { 4935 ret = PTR_ERR(di); 4936 goto out; 4937 } 4938 btrfs_release_path(search_path); 4939 4940 cur_offset += this_len; 4941 } 4942 ret = 0; 4943 out: 4944 btrfs_free_path(search_path); 4945 kfree(name); 4946 return ret; 4947 } 4948 4949 struct btrfs_ino_list { 4950 u64 ino; 4951 u64 parent; 4952 struct list_head list; 4953 }; 4954 4955 static int log_conflicting_inodes(struct btrfs_trans_handle *trans, 4956 struct btrfs_root *root, 4957 struct btrfs_path *path, 4958 struct btrfs_log_ctx *ctx, 4959 u64 ino, u64 parent) 4960 { 4961 struct btrfs_ino_list *ino_elem; 4962 LIST_HEAD(inode_list); 4963 int ret = 0; 4964 4965 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 4966 if (!ino_elem) 4967 return -ENOMEM; 4968 ino_elem->ino = ino; 4969 ino_elem->parent = parent; 4970 list_add_tail(&ino_elem->list, &inode_list); 4971 4972 while (!list_empty(&inode_list)) { 4973 struct btrfs_fs_info *fs_info = root->fs_info; 4974 struct btrfs_key key; 4975 struct inode *inode; 4976 4977 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list, 4978 list); 4979 ino = ino_elem->ino; 4980 parent = ino_elem->parent; 4981 list_del(&ino_elem->list); 4982 kfree(ino_elem); 4983 if (ret) 4984 continue; 4985 4986 btrfs_release_path(path); 4987 4988 key.objectid = ino; 4989 key.type = BTRFS_INODE_ITEM_KEY; 4990 key.offset = 0; 4991 inode = btrfs_iget(fs_info->sb, &key, root); 4992 /* 4993 * If the other inode that had a conflicting dir entry was 4994 * deleted in the current transaction, we need to log its parent 4995 * directory. 4996 */ 4997 if (IS_ERR(inode)) { 4998 ret = PTR_ERR(inode); 4999 if (ret == -ENOENT) { 5000 key.objectid = parent; 5001 inode = btrfs_iget(fs_info->sb, &key, root); 5002 if (IS_ERR(inode)) { 5003 ret = PTR_ERR(inode); 5004 } else { 5005 ret = btrfs_log_inode(trans, root, 5006 BTRFS_I(inode), 5007 LOG_OTHER_INODE_ALL, 5008 0, LLONG_MAX, ctx); 5009 btrfs_add_delayed_iput(inode); 5010 } 5011 } 5012 continue; 5013 } 5014 /* 5015 * We are safe logging the other inode without acquiring its 5016 * lock as long as we log with the LOG_INODE_EXISTS mode. We 5017 * are safe against concurrent renames of the other inode as 5018 * well because during a rename we pin the log and update the 5019 * log with the new name before we unpin it. 5020 */ 5021 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5022 LOG_OTHER_INODE, 0, LLONG_MAX, ctx); 5023 if (ret) { 5024 btrfs_add_delayed_iput(inode); 5025 continue; 5026 } 5027 5028 key.objectid = ino; 5029 key.type = BTRFS_INODE_REF_KEY; 5030 key.offset = 0; 5031 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5032 if (ret < 0) { 5033 btrfs_add_delayed_iput(inode); 5034 continue; 5035 } 5036 5037 while (true) { 5038 struct extent_buffer *leaf = path->nodes[0]; 5039 int slot = path->slots[0]; 5040 u64 other_ino = 0; 5041 u64 other_parent = 0; 5042 5043 if (slot >= btrfs_header_nritems(leaf)) { 5044 ret = btrfs_next_leaf(root, path); 5045 if (ret < 0) { 5046 break; 5047 } else if (ret > 0) { 5048 ret = 0; 5049 break; 5050 } 5051 continue; 5052 } 5053 5054 btrfs_item_key_to_cpu(leaf, &key, slot); 5055 if (key.objectid != ino || 5056 (key.type != BTRFS_INODE_REF_KEY && 5057 key.type != BTRFS_INODE_EXTREF_KEY)) { 5058 ret = 0; 5059 break; 5060 } 5061 5062 ret = btrfs_check_ref_name_override(leaf, slot, &key, 5063 BTRFS_I(inode), &other_ino, 5064 &other_parent); 5065 if (ret < 0) 5066 break; 5067 if (ret > 0) { 5068 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 5069 if (!ino_elem) { 5070 ret = -ENOMEM; 5071 break; 5072 } 5073 ino_elem->ino = other_ino; 5074 ino_elem->parent = other_parent; 5075 list_add_tail(&ino_elem->list, &inode_list); 5076 ret = 0; 5077 } 5078 path->slots[0]++; 5079 } 5080 btrfs_add_delayed_iput(inode); 5081 } 5082 5083 return ret; 5084 } 5085 5086 /* log a single inode in the tree log. 5087 * At least one parent directory for this inode must exist in the tree 5088 * or be logged already. 5089 * 5090 * Any items from this inode changed by the current transaction are copied 5091 * to the log tree. An extra reference is taken on any extents in this 5092 * file, allowing us to avoid a whole pile of corner cases around logging 5093 * blocks that have been removed from the tree. 5094 * 5095 * See LOG_INODE_ALL and related defines for a description of what inode_only 5096 * does. 5097 * 5098 * This handles both files and directories. 5099 */ 5100 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 5101 struct btrfs_root *root, struct btrfs_inode *inode, 5102 int inode_only, 5103 const loff_t start, 5104 const loff_t end, 5105 struct btrfs_log_ctx *ctx) 5106 { 5107 struct btrfs_fs_info *fs_info = root->fs_info; 5108 struct btrfs_path *path; 5109 struct btrfs_path *dst_path; 5110 struct btrfs_key min_key; 5111 struct btrfs_key max_key; 5112 struct btrfs_root *log = root->log_root; 5113 u64 last_extent = 0; 5114 int err = 0; 5115 int ret; 5116 int nritems; 5117 int ins_start_slot = 0; 5118 int ins_nr; 5119 bool fast_search = false; 5120 u64 ino = btrfs_ino(inode); 5121 struct extent_map_tree *em_tree = &inode->extent_tree; 5122 u64 logged_isize = 0; 5123 bool need_log_inode_item = true; 5124 bool xattrs_logged = false; 5125 bool recursive_logging = false; 5126 5127 path = btrfs_alloc_path(); 5128 if (!path) 5129 return -ENOMEM; 5130 dst_path = btrfs_alloc_path(); 5131 if (!dst_path) { 5132 btrfs_free_path(path); 5133 return -ENOMEM; 5134 } 5135 5136 min_key.objectid = ino; 5137 min_key.type = BTRFS_INODE_ITEM_KEY; 5138 min_key.offset = 0; 5139 5140 max_key.objectid = ino; 5141 5142 5143 /* today the code can only do partial logging of directories */ 5144 if (S_ISDIR(inode->vfs_inode.i_mode) || 5145 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5146 &inode->runtime_flags) && 5147 inode_only >= LOG_INODE_EXISTS)) 5148 max_key.type = BTRFS_XATTR_ITEM_KEY; 5149 else 5150 max_key.type = (u8)-1; 5151 max_key.offset = (u64)-1; 5152 5153 /* 5154 * Only run delayed items if we are a dir or a new file. 5155 * Otherwise commit the delayed inode only, which is needed in 5156 * order for the log replay code to mark inodes for link count 5157 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). 5158 */ 5159 if (S_ISDIR(inode->vfs_inode.i_mode) || 5160 inode->generation > fs_info->last_trans_committed) 5161 ret = btrfs_commit_inode_delayed_items(trans, inode); 5162 else 5163 ret = btrfs_commit_inode_delayed_inode(inode); 5164 5165 if (ret) { 5166 btrfs_free_path(path); 5167 btrfs_free_path(dst_path); 5168 return ret; 5169 } 5170 5171 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) { 5172 recursive_logging = true; 5173 if (inode_only == LOG_OTHER_INODE) 5174 inode_only = LOG_INODE_EXISTS; 5175 else 5176 inode_only = LOG_INODE_ALL; 5177 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); 5178 } else { 5179 mutex_lock(&inode->log_mutex); 5180 } 5181 5182 /* 5183 * a brute force approach to making sure we get the most uptodate 5184 * copies of everything. 5185 */ 5186 if (S_ISDIR(inode->vfs_inode.i_mode)) { 5187 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 5188 5189 if (inode_only == LOG_INODE_EXISTS) 5190 max_key_type = BTRFS_XATTR_ITEM_KEY; 5191 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 5192 } else { 5193 if (inode_only == LOG_INODE_EXISTS) { 5194 /* 5195 * Make sure the new inode item we write to the log has 5196 * the same isize as the current one (if it exists). 5197 * This is necessary to prevent data loss after log 5198 * replay, and also to prevent doing a wrong expanding 5199 * truncate - for e.g. create file, write 4K into offset 5200 * 0, fsync, write 4K into offset 4096, add hard link, 5201 * fsync some other file (to sync log), power fail - if 5202 * we use the inode's current i_size, after log replay 5203 * we get a 8Kb file, with the last 4Kb extent as a hole 5204 * (zeroes), as if an expanding truncate happened, 5205 * instead of getting a file of 4Kb only. 5206 */ 5207 err = logged_inode_size(log, inode, path, &logged_isize); 5208 if (err) 5209 goto out_unlock; 5210 } 5211 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5212 &inode->runtime_flags)) { 5213 if (inode_only == LOG_INODE_EXISTS) { 5214 max_key.type = BTRFS_XATTR_ITEM_KEY; 5215 ret = drop_objectid_items(trans, log, path, ino, 5216 max_key.type); 5217 } else { 5218 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5219 &inode->runtime_flags); 5220 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5221 &inode->runtime_flags); 5222 while(1) { 5223 ret = btrfs_truncate_inode_items(trans, 5224 log, &inode->vfs_inode, 0, 0); 5225 if (ret != -EAGAIN) 5226 break; 5227 } 5228 } 5229 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5230 &inode->runtime_flags) || 5231 inode_only == LOG_INODE_EXISTS) { 5232 if (inode_only == LOG_INODE_ALL) 5233 fast_search = true; 5234 max_key.type = BTRFS_XATTR_ITEM_KEY; 5235 ret = drop_objectid_items(trans, log, path, ino, 5236 max_key.type); 5237 } else { 5238 if (inode_only == LOG_INODE_ALL) 5239 fast_search = true; 5240 goto log_extents; 5241 } 5242 5243 } 5244 if (ret) { 5245 err = ret; 5246 goto out_unlock; 5247 } 5248 5249 while (1) { 5250 ins_nr = 0; 5251 ret = btrfs_search_forward(root, &min_key, 5252 path, trans->transid); 5253 if (ret < 0) { 5254 err = ret; 5255 goto out_unlock; 5256 } 5257 if (ret != 0) 5258 break; 5259 again: 5260 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 5261 if (min_key.objectid != ino) 5262 break; 5263 if (min_key.type > max_key.type) 5264 break; 5265 5266 if (min_key.type == BTRFS_INODE_ITEM_KEY) 5267 need_log_inode_item = false; 5268 5269 if ((min_key.type == BTRFS_INODE_REF_KEY || 5270 min_key.type == BTRFS_INODE_EXTREF_KEY) && 5271 inode->generation == trans->transid && 5272 !recursive_logging) { 5273 u64 other_ino = 0; 5274 u64 other_parent = 0; 5275 5276 ret = btrfs_check_ref_name_override(path->nodes[0], 5277 path->slots[0], &min_key, inode, 5278 &other_ino, &other_parent); 5279 if (ret < 0) { 5280 err = ret; 5281 goto out_unlock; 5282 } else if (ret > 0 && ctx && 5283 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { 5284 if (ins_nr > 0) { 5285 ins_nr++; 5286 } else { 5287 ins_nr = 1; 5288 ins_start_slot = path->slots[0]; 5289 } 5290 ret = copy_items(trans, inode, dst_path, path, 5291 &last_extent, ins_start_slot, 5292 ins_nr, inode_only, 5293 logged_isize); 5294 if (ret < 0) { 5295 err = ret; 5296 goto out_unlock; 5297 } 5298 ins_nr = 0; 5299 5300 err = log_conflicting_inodes(trans, root, path, 5301 ctx, other_ino, other_parent); 5302 if (err) 5303 goto out_unlock; 5304 btrfs_release_path(path); 5305 goto next_key; 5306 } 5307 } 5308 5309 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 5310 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 5311 if (ins_nr == 0) 5312 goto next_slot; 5313 ret = copy_items(trans, inode, dst_path, path, 5314 &last_extent, ins_start_slot, 5315 ins_nr, inode_only, logged_isize); 5316 if (ret < 0) { 5317 err = ret; 5318 goto out_unlock; 5319 } 5320 ins_nr = 0; 5321 if (ret) { 5322 btrfs_release_path(path); 5323 continue; 5324 } 5325 goto next_slot; 5326 } 5327 5328 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 5329 ins_nr++; 5330 goto next_slot; 5331 } else if (!ins_nr) { 5332 ins_start_slot = path->slots[0]; 5333 ins_nr = 1; 5334 goto next_slot; 5335 } 5336 5337 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5338 ins_start_slot, ins_nr, inode_only, 5339 logged_isize); 5340 if (ret < 0) { 5341 err = ret; 5342 goto out_unlock; 5343 } 5344 if (ret) { 5345 ins_nr = 0; 5346 btrfs_release_path(path); 5347 continue; 5348 } 5349 ins_nr = 1; 5350 ins_start_slot = path->slots[0]; 5351 next_slot: 5352 5353 nritems = btrfs_header_nritems(path->nodes[0]); 5354 path->slots[0]++; 5355 if (path->slots[0] < nritems) { 5356 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 5357 path->slots[0]); 5358 goto again; 5359 } 5360 if (ins_nr) { 5361 ret = copy_items(trans, inode, dst_path, path, 5362 &last_extent, ins_start_slot, 5363 ins_nr, inode_only, logged_isize); 5364 if (ret < 0) { 5365 err = ret; 5366 goto out_unlock; 5367 } 5368 ret = 0; 5369 ins_nr = 0; 5370 } 5371 btrfs_release_path(path); 5372 next_key: 5373 if (min_key.offset < (u64)-1) { 5374 min_key.offset++; 5375 } else if (min_key.type < max_key.type) { 5376 min_key.type++; 5377 min_key.offset = 0; 5378 } else { 5379 break; 5380 } 5381 } 5382 if (ins_nr) { 5383 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5384 ins_start_slot, ins_nr, inode_only, 5385 logged_isize); 5386 if (ret < 0) { 5387 err = ret; 5388 goto out_unlock; 5389 } 5390 ret = 0; 5391 ins_nr = 0; 5392 } 5393 5394 btrfs_release_path(path); 5395 btrfs_release_path(dst_path); 5396 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 5397 if (err) 5398 goto out_unlock; 5399 xattrs_logged = true; 5400 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 5401 btrfs_release_path(path); 5402 btrfs_release_path(dst_path); 5403 err = btrfs_log_trailing_hole(trans, root, inode, path); 5404 if (err) 5405 goto out_unlock; 5406 } 5407 log_extents: 5408 btrfs_release_path(path); 5409 btrfs_release_path(dst_path); 5410 if (need_log_inode_item) { 5411 err = log_inode_item(trans, log, dst_path, inode); 5412 if (!err && !xattrs_logged) { 5413 err = btrfs_log_all_xattrs(trans, root, inode, path, 5414 dst_path); 5415 btrfs_release_path(path); 5416 } 5417 if (err) 5418 goto out_unlock; 5419 } 5420 if (fast_search) { 5421 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 5422 ctx, start, end); 5423 if (ret) { 5424 err = ret; 5425 goto out_unlock; 5426 } 5427 } else if (inode_only == LOG_INODE_ALL) { 5428 struct extent_map *em, *n; 5429 5430 write_lock(&em_tree->lock); 5431 /* 5432 * We can't just remove every em if we're called for a ranged 5433 * fsync - that is, one that doesn't cover the whole possible 5434 * file range (0 to LLONG_MAX). This is because we can have 5435 * em's that fall outside the range we're logging and therefore 5436 * their ordered operations haven't completed yet 5437 * (btrfs_finish_ordered_io() not invoked yet). This means we 5438 * didn't get their respective file extent item in the fs/subvol 5439 * tree yet, and need to let the next fast fsync (one which 5440 * consults the list of modified extent maps) find the em so 5441 * that it logs a matching file extent item and waits for the 5442 * respective ordered operation to complete (if it's still 5443 * running). 5444 * 5445 * Removing every em outside the range we're logging would make 5446 * the next fast fsync not log their matching file extent items, 5447 * therefore making us lose data after a log replay. 5448 */ 5449 list_for_each_entry_safe(em, n, &em_tree->modified_extents, 5450 list) { 5451 const u64 mod_end = em->mod_start + em->mod_len - 1; 5452 5453 if (em->mod_start >= start && mod_end <= end) 5454 list_del_init(&em->list); 5455 } 5456 write_unlock(&em_tree->lock); 5457 } 5458 5459 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { 5460 ret = log_directory_changes(trans, root, inode, path, dst_path, 5461 ctx); 5462 if (ret) { 5463 err = ret; 5464 goto out_unlock; 5465 } 5466 } 5467 5468 /* 5469 * Don't update last_log_commit if we logged that an inode exists after 5470 * it was loaded to memory (full_sync bit set). 5471 * This is to prevent data loss when we do a write to the inode, then 5472 * the inode gets evicted after all delalloc was flushed, then we log 5473 * it exists (due to a rename for example) and then fsync it. This last 5474 * fsync would do nothing (not logging the extents previously written). 5475 */ 5476 spin_lock(&inode->lock); 5477 inode->logged_trans = trans->transid; 5478 if (inode_only != LOG_INODE_EXISTS || 5479 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) 5480 inode->last_log_commit = inode->last_sub_trans; 5481 spin_unlock(&inode->lock); 5482 out_unlock: 5483 mutex_unlock(&inode->log_mutex); 5484 5485 btrfs_free_path(path); 5486 btrfs_free_path(dst_path); 5487 return err; 5488 } 5489 5490 /* 5491 * Check if we must fallback to a transaction commit when logging an inode. 5492 * This must be called after logging the inode and is used only in the context 5493 * when fsyncing an inode requires the need to log some other inode - in which 5494 * case we can't lock the i_mutex of each other inode we need to log as that 5495 * can lead to deadlocks with concurrent fsync against other inodes (as we can 5496 * log inodes up or down in the hierarchy) or rename operations for example. So 5497 * we take the log_mutex of the inode after we have logged it and then check for 5498 * its last_unlink_trans value - this is safe because any task setting 5499 * last_unlink_trans must take the log_mutex and it must do this before it does 5500 * the actual unlink operation, so if we do this check before a concurrent task 5501 * sets last_unlink_trans it means we've logged a consistent version/state of 5502 * all the inode items, otherwise we are not sure and must do a transaction 5503 * commit (the concurrent task might have only updated last_unlink_trans before 5504 * we logged the inode or it might have also done the unlink). 5505 */ 5506 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 5507 struct btrfs_inode *inode) 5508 { 5509 struct btrfs_fs_info *fs_info = inode->root->fs_info; 5510 bool ret = false; 5511 5512 mutex_lock(&inode->log_mutex); 5513 if (inode->last_unlink_trans > fs_info->last_trans_committed) { 5514 /* 5515 * Make sure any commits to the log are forced to be full 5516 * commits. 5517 */ 5518 btrfs_set_log_full_commit(trans); 5519 ret = true; 5520 } 5521 mutex_unlock(&inode->log_mutex); 5522 5523 return ret; 5524 } 5525 5526 /* 5527 * follow the dentry parent pointers up the chain and see if any 5528 * of the directories in it require a full commit before they can 5529 * be logged. Returns zero if nothing special needs to be done or 1 if 5530 * a full commit is required. 5531 */ 5532 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 5533 struct btrfs_inode *inode, 5534 struct dentry *parent, 5535 struct super_block *sb, 5536 u64 last_committed) 5537 { 5538 int ret = 0; 5539 struct dentry *old_parent = NULL; 5540 5541 /* 5542 * for regular files, if its inode is already on disk, we don't 5543 * have to worry about the parents at all. This is because 5544 * we can use the last_unlink_trans field to record renames 5545 * and other fun in this file. 5546 */ 5547 if (S_ISREG(inode->vfs_inode.i_mode) && 5548 inode->generation <= last_committed && 5549 inode->last_unlink_trans <= last_committed) 5550 goto out; 5551 5552 if (!S_ISDIR(inode->vfs_inode.i_mode)) { 5553 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5554 goto out; 5555 inode = BTRFS_I(d_inode(parent)); 5556 } 5557 5558 while (1) { 5559 if (btrfs_must_commit_transaction(trans, inode)) { 5560 ret = 1; 5561 break; 5562 } 5563 5564 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5565 break; 5566 5567 if (IS_ROOT(parent)) { 5568 inode = BTRFS_I(d_inode(parent)); 5569 if (btrfs_must_commit_transaction(trans, inode)) 5570 ret = 1; 5571 break; 5572 } 5573 5574 parent = dget_parent(parent); 5575 dput(old_parent); 5576 old_parent = parent; 5577 inode = BTRFS_I(d_inode(parent)); 5578 5579 } 5580 dput(old_parent); 5581 out: 5582 return ret; 5583 } 5584 5585 struct btrfs_dir_list { 5586 u64 ino; 5587 struct list_head list; 5588 }; 5589 5590 /* 5591 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5592 * details about the why it is needed. 5593 * This is a recursive operation - if an existing dentry corresponds to a 5594 * directory, that directory's new entries are logged too (same behaviour as 5595 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5596 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5597 * complains about the following circular lock dependency / possible deadlock: 5598 * 5599 * CPU0 CPU1 5600 * ---- ---- 5601 * lock(&type->i_mutex_dir_key#3/2); 5602 * lock(sb_internal#2); 5603 * lock(&type->i_mutex_dir_key#3/2); 5604 * lock(&sb->s_type->i_mutex_key#14); 5605 * 5606 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5607 * sb_start_intwrite() in btrfs_start_transaction(). 5608 * Not locking i_mutex of the inodes is still safe because: 5609 * 5610 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5611 * that while logging the inode new references (names) are added or removed 5612 * from the inode, leaving the logged inode item with a link count that does 5613 * not match the number of logged inode reference items. This is fine because 5614 * at log replay time we compute the real number of links and correct the 5615 * link count in the inode item (see replay_one_buffer() and 5616 * link_to_fixup_dir()); 5617 * 5618 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5619 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5620 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5621 * has a size that doesn't match the sum of the lengths of all the logged 5622 * names. This does not result in a problem because if a dir_item key is 5623 * logged but its matching dir_index key is not logged, at log replay time we 5624 * don't use it to replay the respective name (see replay_one_name()). On the 5625 * other hand if only the dir_index key ends up being logged, the respective 5626 * name is added to the fs/subvol tree with both the dir_item and dir_index 5627 * keys created (see replay_one_name()). 5628 * The directory's inode item with a wrong i_size is not a problem as well, 5629 * since we don't use it at log replay time to set the i_size in the inode 5630 * item of the fs/subvol tree (see overwrite_item()). 5631 */ 5632 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5633 struct btrfs_root *root, 5634 struct btrfs_inode *start_inode, 5635 struct btrfs_log_ctx *ctx) 5636 { 5637 struct btrfs_fs_info *fs_info = root->fs_info; 5638 struct btrfs_root *log = root->log_root; 5639 struct btrfs_path *path; 5640 LIST_HEAD(dir_list); 5641 struct btrfs_dir_list *dir_elem; 5642 int ret = 0; 5643 5644 path = btrfs_alloc_path(); 5645 if (!path) 5646 return -ENOMEM; 5647 5648 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5649 if (!dir_elem) { 5650 btrfs_free_path(path); 5651 return -ENOMEM; 5652 } 5653 dir_elem->ino = btrfs_ino(start_inode); 5654 list_add_tail(&dir_elem->list, &dir_list); 5655 5656 while (!list_empty(&dir_list)) { 5657 struct extent_buffer *leaf; 5658 struct btrfs_key min_key; 5659 int nritems; 5660 int i; 5661 5662 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5663 list); 5664 if (ret) 5665 goto next_dir_inode; 5666 5667 min_key.objectid = dir_elem->ino; 5668 min_key.type = BTRFS_DIR_ITEM_KEY; 5669 min_key.offset = 0; 5670 again: 5671 btrfs_release_path(path); 5672 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5673 if (ret < 0) { 5674 goto next_dir_inode; 5675 } else if (ret > 0) { 5676 ret = 0; 5677 goto next_dir_inode; 5678 } 5679 5680 process_leaf: 5681 leaf = path->nodes[0]; 5682 nritems = btrfs_header_nritems(leaf); 5683 for (i = path->slots[0]; i < nritems; i++) { 5684 struct btrfs_dir_item *di; 5685 struct btrfs_key di_key; 5686 struct inode *di_inode; 5687 struct btrfs_dir_list *new_dir_elem; 5688 int log_mode = LOG_INODE_EXISTS; 5689 int type; 5690 5691 btrfs_item_key_to_cpu(leaf, &min_key, i); 5692 if (min_key.objectid != dir_elem->ino || 5693 min_key.type != BTRFS_DIR_ITEM_KEY) 5694 goto next_dir_inode; 5695 5696 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5697 type = btrfs_dir_type(leaf, di); 5698 if (btrfs_dir_transid(leaf, di) < trans->transid && 5699 type != BTRFS_FT_DIR) 5700 continue; 5701 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5702 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5703 continue; 5704 5705 btrfs_release_path(path); 5706 di_inode = btrfs_iget(fs_info->sb, &di_key, root); 5707 if (IS_ERR(di_inode)) { 5708 ret = PTR_ERR(di_inode); 5709 goto next_dir_inode; 5710 } 5711 5712 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { 5713 btrfs_add_delayed_iput(di_inode); 5714 break; 5715 } 5716 5717 ctx->log_new_dentries = false; 5718 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5719 log_mode = LOG_INODE_ALL; 5720 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), 5721 log_mode, 0, LLONG_MAX, ctx); 5722 if (!ret && 5723 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) 5724 ret = 1; 5725 btrfs_add_delayed_iput(di_inode); 5726 if (ret) 5727 goto next_dir_inode; 5728 if (ctx->log_new_dentries) { 5729 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5730 GFP_NOFS); 5731 if (!new_dir_elem) { 5732 ret = -ENOMEM; 5733 goto next_dir_inode; 5734 } 5735 new_dir_elem->ino = di_key.objectid; 5736 list_add_tail(&new_dir_elem->list, &dir_list); 5737 } 5738 break; 5739 } 5740 if (i == nritems) { 5741 ret = btrfs_next_leaf(log, path); 5742 if (ret < 0) { 5743 goto next_dir_inode; 5744 } else if (ret > 0) { 5745 ret = 0; 5746 goto next_dir_inode; 5747 } 5748 goto process_leaf; 5749 } 5750 if (min_key.offset < (u64)-1) { 5751 min_key.offset++; 5752 goto again; 5753 } 5754 next_dir_inode: 5755 list_del(&dir_elem->list); 5756 kfree(dir_elem); 5757 } 5758 5759 btrfs_free_path(path); 5760 return ret; 5761 } 5762 5763 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5764 struct btrfs_inode *inode, 5765 struct btrfs_log_ctx *ctx) 5766 { 5767 struct btrfs_fs_info *fs_info = trans->fs_info; 5768 int ret; 5769 struct btrfs_path *path; 5770 struct btrfs_key key; 5771 struct btrfs_root *root = inode->root; 5772 const u64 ino = btrfs_ino(inode); 5773 5774 path = btrfs_alloc_path(); 5775 if (!path) 5776 return -ENOMEM; 5777 path->skip_locking = 1; 5778 path->search_commit_root = 1; 5779 5780 key.objectid = ino; 5781 key.type = BTRFS_INODE_REF_KEY; 5782 key.offset = 0; 5783 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5784 if (ret < 0) 5785 goto out; 5786 5787 while (true) { 5788 struct extent_buffer *leaf = path->nodes[0]; 5789 int slot = path->slots[0]; 5790 u32 cur_offset = 0; 5791 u32 item_size; 5792 unsigned long ptr; 5793 5794 if (slot >= btrfs_header_nritems(leaf)) { 5795 ret = btrfs_next_leaf(root, path); 5796 if (ret < 0) 5797 goto out; 5798 else if (ret > 0) 5799 break; 5800 continue; 5801 } 5802 5803 btrfs_item_key_to_cpu(leaf, &key, slot); 5804 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5805 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5806 break; 5807 5808 item_size = btrfs_item_size_nr(leaf, slot); 5809 ptr = btrfs_item_ptr_offset(leaf, slot); 5810 while (cur_offset < item_size) { 5811 struct btrfs_key inode_key; 5812 struct inode *dir_inode; 5813 5814 inode_key.type = BTRFS_INODE_ITEM_KEY; 5815 inode_key.offset = 0; 5816 5817 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5818 struct btrfs_inode_extref *extref; 5819 5820 extref = (struct btrfs_inode_extref *) 5821 (ptr + cur_offset); 5822 inode_key.objectid = btrfs_inode_extref_parent( 5823 leaf, extref); 5824 cur_offset += sizeof(*extref); 5825 cur_offset += btrfs_inode_extref_name_len(leaf, 5826 extref); 5827 } else { 5828 inode_key.objectid = key.offset; 5829 cur_offset = item_size; 5830 } 5831 5832 dir_inode = btrfs_iget(fs_info->sb, &inode_key, root); 5833 /* 5834 * If the parent inode was deleted, return an error to 5835 * fallback to a transaction commit. This is to prevent 5836 * getting an inode that was moved from one parent A to 5837 * a parent B, got its former parent A deleted and then 5838 * it got fsync'ed, from existing at both parents after 5839 * a log replay (and the old parent still existing). 5840 * Example: 5841 * 5842 * mkdir /mnt/A 5843 * mkdir /mnt/B 5844 * touch /mnt/B/bar 5845 * sync 5846 * mv /mnt/B/bar /mnt/A/bar 5847 * mv -T /mnt/A /mnt/B 5848 * fsync /mnt/B/bar 5849 * <power fail> 5850 * 5851 * If we ignore the old parent B which got deleted, 5852 * after a log replay we would have file bar linked 5853 * at both parents and the old parent B would still 5854 * exist. 5855 */ 5856 if (IS_ERR(dir_inode)) { 5857 ret = PTR_ERR(dir_inode); 5858 goto out; 5859 } 5860 5861 if (ctx) 5862 ctx->log_new_dentries = false; 5863 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), 5864 LOG_INODE_ALL, 0, LLONG_MAX, ctx); 5865 if (!ret && 5866 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode))) 5867 ret = 1; 5868 if (!ret && ctx && ctx->log_new_dentries) 5869 ret = log_new_dir_dentries(trans, root, 5870 BTRFS_I(dir_inode), ctx); 5871 btrfs_add_delayed_iput(dir_inode); 5872 if (ret) 5873 goto out; 5874 } 5875 path->slots[0]++; 5876 } 5877 ret = 0; 5878 out: 5879 btrfs_free_path(path); 5880 return ret; 5881 } 5882 5883 static int log_new_ancestors(struct btrfs_trans_handle *trans, 5884 struct btrfs_root *root, 5885 struct btrfs_path *path, 5886 struct btrfs_log_ctx *ctx) 5887 { 5888 struct btrfs_key found_key; 5889 5890 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 5891 5892 while (true) { 5893 struct btrfs_fs_info *fs_info = root->fs_info; 5894 const u64 last_committed = fs_info->last_trans_committed; 5895 struct extent_buffer *leaf = path->nodes[0]; 5896 int slot = path->slots[0]; 5897 struct btrfs_key search_key; 5898 struct inode *inode; 5899 int ret = 0; 5900 5901 btrfs_release_path(path); 5902 5903 search_key.objectid = found_key.offset; 5904 search_key.type = BTRFS_INODE_ITEM_KEY; 5905 search_key.offset = 0; 5906 inode = btrfs_iget(fs_info->sb, &search_key, root); 5907 if (IS_ERR(inode)) 5908 return PTR_ERR(inode); 5909 5910 if (BTRFS_I(inode)->generation > last_committed) 5911 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5912 LOG_INODE_EXISTS, 5913 0, LLONG_MAX, ctx); 5914 btrfs_add_delayed_iput(inode); 5915 if (ret) 5916 return ret; 5917 5918 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID) 5919 break; 5920 5921 search_key.type = BTRFS_INODE_REF_KEY; 5922 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 5923 if (ret < 0) 5924 return ret; 5925 5926 leaf = path->nodes[0]; 5927 slot = path->slots[0]; 5928 if (slot >= btrfs_header_nritems(leaf)) { 5929 ret = btrfs_next_leaf(root, path); 5930 if (ret < 0) 5931 return ret; 5932 else if (ret > 0) 5933 return -ENOENT; 5934 leaf = path->nodes[0]; 5935 slot = path->slots[0]; 5936 } 5937 5938 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5939 if (found_key.objectid != search_key.objectid || 5940 found_key.type != BTRFS_INODE_REF_KEY) 5941 return -ENOENT; 5942 } 5943 return 0; 5944 } 5945 5946 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans, 5947 struct btrfs_inode *inode, 5948 struct dentry *parent, 5949 struct btrfs_log_ctx *ctx) 5950 { 5951 struct btrfs_root *root = inode->root; 5952 struct btrfs_fs_info *fs_info = root->fs_info; 5953 struct dentry *old_parent = NULL; 5954 struct super_block *sb = inode->vfs_inode.i_sb; 5955 int ret = 0; 5956 5957 while (true) { 5958 if (!parent || d_really_is_negative(parent) || 5959 sb != parent->d_sb) 5960 break; 5961 5962 inode = BTRFS_I(d_inode(parent)); 5963 if (root != inode->root) 5964 break; 5965 5966 if (inode->generation > fs_info->last_trans_committed) { 5967 ret = btrfs_log_inode(trans, root, inode, 5968 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); 5969 if (ret) 5970 break; 5971 } 5972 if (IS_ROOT(parent)) 5973 break; 5974 5975 parent = dget_parent(parent); 5976 dput(old_parent); 5977 old_parent = parent; 5978 } 5979 dput(old_parent); 5980 5981 return ret; 5982 } 5983 5984 static int log_all_new_ancestors(struct btrfs_trans_handle *trans, 5985 struct btrfs_inode *inode, 5986 struct dentry *parent, 5987 struct btrfs_log_ctx *ctx) 5988 { 5989 struct btrfs_root *root = inode->root; 5990 const u64 ino = btrfs_ino(inode); 5991 struct btrfs_path *path; 5992 struct btrfs_key search_key; 5993 int ret; 5994 5995 /* 5996 * For a single hard link case, go through a fast path that does not 5997 * need to iterate the fs/subvolume tree. 5998 */ 5999 if (inode->vfs_inode.i_nlink < 2) 6000 return log_new_ancestors_fast(trans, inode, parent, ctx); 6001 6002 path = btrfs_alloc_path(); 6003 if (!path) 6004 return -ENOMEM; 6005 6006 search_key.objectid = ino; 6007 search_key.type = BTRFS_INODE_REF_KEY; 6008 search_key.offset = 0; 6009 again: 6010 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 6011 if (ret < 0) 6012 goto out; 6013 if (ret == 0) 6014 path->slots[0]++; 6015 6016 while (true) { 6017 struct extent_buffer *leaf = path->nodes[0]; 6018 int slot = path->slots[0]; 6019 struct btrfs_key found_key; 6020 6021 if (slot >= btrfs_header_nritems(leaf)) { 6022 ret = btrfs_next_leaf(root, path); 6023 if (ret < 0) 6024 goto out; 6025 else if (ret > 0) 6026 break; 6027 continue; 6028 } 6029 6030 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6031 if (found_key.objectid != ino || 6032 found_key.type > BTRFS_INODE_EXTREF_KEY) 6033 break; 6034 6035 /* 6036 * Don't deal with extended references because they are rare 6037 * cases and too complex to deal with (we would need to keep 6038 * track of which subitem we are processing for each item in 6039 * this loop, etc). So just return some error to fallback to 6040 * a transaction commit. 6041 */ 6042 if (found_key.type == BTRFS_INODE_EXTREF_KEY) { 6043 ret = -EMLINK; 6044 goto out; 6045 } 6046 6047 /* 6048 * Logging ancestors needs to do more searches on the fs/subvol 6049 * tree, so it releases the path as needed to avoid deadlocks. 6050 * Keep track of the last inode ref key and resume from that key 6051 * after logging all new ancestors for the current hard link. 6052 */ 6053 memcpy(&search_key, &found_key, sizeof(search_key)); 6054 6055 ret = log_new_ancestors(trans, root, path, ctx); 6056 if (ret) 6057 goto out; 6058 btrfs_release_path(path); 6059 goto again; 6060 } 6061 ret = 0; 6062 out: 6063 btrfs_free_path(path); 6064 return ret; 6065 } 6066 6067 /* 6068 * helper function around btrfs_log_inode to make sure newly created 6069 * parent directories also end up in the log. A minimal inode and backref 6070 * only logging is done of any parent directories that are older than 6071 * the last committed transaction 6072 */ 6073 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 6074 struct btrfs_inode *inode, 6075 struct dentry *parent, 6076 const loff_t start, 6077 const loff_t end, 6078 int inode_only, 6079 struct btrfs_log_ctx *ctx) 6080 { 6081 struct btrfs_root *root = inode->root; 6082 struct btrfs_fs_info *fs_info = root->fs_info; 6083 struct super_block *sb; 6084 int ret = 0; 6085 u64 last_committed = fs_info->last_trans_committed; 6086 bool log_dentries = false; 6087 6088 sb = inode->vfs_inode.i_sb; 6089 6090 if (btrfs_test_opt(fs_info, NOTREELOG)) { 6091 ret = 1; 6092 goto end_no_trans; 6093 } 6094 6095 /* 6096 * The prev transaction commit doesn't complete, we need do 6097 * full commit by ourselves. 6098 */ 6099 if (fs_info->last_trans_log_full_commit > 6100 fs_info->last_trans_committed) { 6101 ret = 1; 6102 goto end_no_trans; 6103 } 6104 6105 if (btrfs_root_refs(&root->root_item) == 0) { 6106 ret = 1; 6107 goto end_no_trans; 6108 } 6109 6110 ret = check_parent_dirs_for_sync(trans, inode, parent, sb, 6111 last_committed); 6112 if (ret) 6113 goto end_no_trans; 6114 6115 /* 6116 * Skip already logged inodes or inodes corresponding to tmpfiles 6117 * (since logging them is pointless, a link count of 0 means they 6118 * will never be accessible). 6119 */ 6120 if (btrfs_inode_in_log(inode, trans->transid) || 6121 inode->vfs_inode.i_nlink == 0) { 6122 ret = BTRFS_NO_LOG_SYNC; 6123 goto end_no_trans; 6124 } 6125 6126 ret = start_log_trans(trans, root, ctx); 6127 if (ret) 6128 goto end_no_trans; 6129 6130 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); 6131 if (ret) 6132 goto end_trans; 6133 6134 /* 6135 * for regular files, if its inode is already on disk, we don't 6136 * have to worry about the parents at all. This is because 6137 * we can use the last_unlink_trans field to record renames 6138 * and other fun in this file. 6139 */ 6140 if (S_ISREG(inode->vfs_inode.i_mode) && 6141 inode->generation <= last_committed && 6142 inode->last_unlink_trans <= last_committed) { 6143 ret = 0; 6144 goto end_trans; 6145 } 6146 6147 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) 6148 log_dentries = true; 6149 6150 /* 6151 * On unlink we must make sure all our current and old parent directory 6152 * inodes are fully logged. This is to prevent leaving dangling 6153 * directory index entries in directories that were our parents but are 6154 * not anymore. Not doing this results in old parent directory being 6155 * impossible to delete after log replay (rmdir will always fail with 6156 * error -ENOTEMPTY). 6157 * 6158 * Example 1: 6159 * 6160 * mkdir testdir 6161 * touch testdir/foo 6162 * ln testdir/foo testdir/bar 6163 * sync 6164 * unlink testdir/bar 6165 * xfs_io -c fsync testdir/foo 6166 * <power failure> 6167 * mount fs, triggers log replay 6168 * 6169 * If we don't log the parent directory (testdir), after log replay the 6170 * directory still has an entry pointing to the file inode using the bar 6171 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 6172 * the file inode has a link count of 1. 6173 * 6174 * Example 2: 6175 * 6176 * mkdir testdir 6177 * touch foo 6178 * ln foo testdir/foo2 6179 * ln foo testdir/foo3 6180 * sync 6181 * unlink testdir/foo3 6182 * xfs_io -c fsync foo 6183 * <power failure> 6184 * mount fs, triggers log replay 6185 * 6186 * Similar as the first example, after log replay the parent directory 6187 * testdir still has an entry pointing to the inode file with name foo3 6188 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 6189 * and has a link count of 2. 6190 */ 6191 if (inode->last_unlink_trans > last_committed) { 6192 ret = btrfs_log_all_parents(trans, inode, ctx); 6193 if (ret) 6194 goto end_trans; 6195 } 6196 6197 ret = log_all_new_ancestors(trans, inode, parent, ctx); 6198 if (ret) 6199 goto end_trans; 6200 6201 if (log_dentries) 6202 ret = log_new_dir_dentries(trans, root, inode, ctx); 6203 else 6204 ret = 0; 6205 end_trans: 6206 if (ret < 0) { 6207 btrfs_set_log_full_commit(trans); 6208 ret = 1; 6209 } 6210 6211 if (ret) 6212 btrfs_remove_log_ctx(root, ctx); 6213 btrfs_end_log_trans(root); 6214 end_no_trans: 6215 return ret; 6216 } 6217 6218 /* 6219 * it is not safe to log dentry if the chunk root has added new 6220 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 6221 * If this returns 1, you must commit the transaction to safely get your 6222 * data on disk. 6223 */ 6224 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 6225 struct dentry *dentry, 6226 const loff_t start, 6227 const loff_t end, 6228 struct btrfs_log_ctx *ctx) 6229 { 6230 struct dentry *parent = dget_parent(dentry); 6231 int ret; 6232 6233 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, 6234 start, end, LOG_INODE_ALL, ctx); 6235 dput(parent); 6236 6237 return ret; 6238 } 6239 6240 /* 6241 * should be called during mount to recover any replay any log trees 6242 * from the FS 6243 */ 6244 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 6245 { 6246 int ret; 6247 struct btrfs_path *path; 6248 struct btrfs_trans_handle *trans; 6249 struct btrfs_key key; 6250 struct btrfs_key found_key; 6251 struct btrfs_key tmp_key; 6252 struct btrfs_root *log; 6253 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 6254 struct walk_control wc = { 6255 .process_func = process_one_buffer, 6256 .stage = LOG_WALK_PIN_ONLY, 6257 }; 6258 6259 path = btrfs_alloc_path(); 6260 if (!path) 6261 return -ENOMEM; 6262 6263 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6264 6265 trans = btrfs_start_transaction(fs_info->tree_root, 0); 6266 if (IS_ERR(trans)) { 6267 ret = PTR_ERR(trans); 6268 goto error; 6269 } 6270 6271 wc.trans = trans; 6272 wc.pin = 1; 6273 6274 ret = walk_log_tree(trans, log_root_tree, &wc); 6275 if (ret) { 6276 btrfs_handle_fs_error(fs_info, ret, 6277 "Failed to pin buffers while recovering log root tree."); 6278 goto error; 6279 } 6280 6281 again: 6282 key.objectid = BTRFS_TREE_LOG_OBJECTID; 6283 key.offset = (u64)-1; 6284 key.type = BTRFS_ROOT_ITEM_KEY; 6285 6286 while (1) { 6287 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 6288 6289 if (ret < 0) { 6290 btrfs_handle_fs_error(fs_info, ret, 6291 "Couldn't find tree log root."); 6292 goto error; 6293 } 6294 if (ret > 0) { 6295 if (path->slots[0] == 0) 6296 break; 6297 path->slots[0]--; 6298 } 6299 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 6300 path->slots[0]); 6301 btrfs_release_path(path); 6302 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 6303 break; 6304 6305 log = btrfs_read_fs_root(log_root_tree, &found_key); 6306 if (IS_ERR(log)) { 6307 ret = PTR_ERR(log); 6308 btrfs_handle_fs_error(fs_info, ret, 6309 "Couldn't read tree log root."); 6310 goto error; 6311 } 6312 6313 tmp_key.objectid = found_key.offset; 6314 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 6315 tmp_key.offset = (u64)-1; 6316 6317 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 6318 if (IS_ERR(wc.replay_dest)) { 6319 ret = PTR_ERR(wc.replay_dest); 6320 6321 /* 6322 * We didn't find the subvol, likely because it was 6323 * deleted. This is ok, simply skip this log and go to 6324 * the next one. 6325 * 6326 * We need to exclude the root because we can't have 6327 * other log replays overwriting this log as we'll read 6328 * it back in a few more times. This will keep our 6329 * block from being modified, and we'll just bail for 6330 * each subsequent pass. 6331 */ 6332 if (ret == -ENOENT) 6333 ret = btrfs_pin_extent_for_log_replay(fs_info, 6334 log->node->start, 6335 log->node->len); 6336 free_extent_buffer(log->node); 6337 free_extent_buffer(log->commit_root); 6338 kfree(log); 6339 6340 if (!ret) 6341 goto next; 6342 btrfs_handle_fs_error(fs_info, ret, 6343 "Couldn't read target root for tree log recovery."); 6344 goto error; 6345 } 6346 6347 wc.replay_dest->log_root = log; 6348 btrfs_record_root_in_trans(trans, wc.replay_dest); 6349 ret = walk_log_tree(trans, log, &wc); 6350 6351 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6352 ret = fixup_inode_link_counts(trans, wc.replay_dest, 6353 path); 6354 } 6355 6356 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6357 struct btrfs_root *root = wc.replay_dest; 6358 6359 btrfs_release_path(path); 6360 6361 /* 6362 * We have just replayed everything, and the highest 6363 * objectid of fs roots probably has changed in case 6364 * some inode_item's got replayed. 6365 * 6366 * root->objectid_mutex is not acquired as log replay 6367 * could only happen during mount. 6368 */ 6369 ret = btrfs_find_highest_objectid(root, 6370 &root->highest_objectid); 6371 } 6372 6373 wc.replay_dest->log_root = NULL; 6374 free_extent_buffer(log->node); 6375 free_extent_buffer(log->commit_root); 6376 kfree(log); 6377 6378 if (ret) 6379 goto error; 6380 next: 6381 if (found_key.offset == 0) 6382 break; 6383 key.offset = found_key.offset - 1; 6384 } 6385 btrfs_release_path(path); 6386 6387 /* step one is to pin it all, step two is to replay just inodes */ 6388 if (wc.pin) { 6389 wc.pin = 0; 6390 wc.process_func = replay_one_buffer; 6391 wc.stage = LOG_WALK_REPLAY_INODES; 6392 goto again; 6393 } 6394 /* step three is to replay everything */ 6395 if (wc.stage < LOG_WALK_REPLAY_ALL) { 6396 wc.stage++; 6397 goto again; 6398 } 6399 6400 btrfs_free_path(path); 6401 6402 /* step 4: commit the transaction, which also unpins the blocks */ 6403 ret = btrfs_commit_transaction(trans); 6404 if (ret) 6405 return ret; 6406 6407 free_extent_buffer(log_root_tree->node); 6408 log_root_tree->log_root = NULL; 6409 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6410 kfree(log_root_tree); 6411 6412 return 0; 6413 error: 6414 if (wc.trans) 6415 btrfs_end_transaction(wc.trans); 6416 btrfs_free_path(path); 6417 return ret; 6418 } 6419 6420 /* 6421 * there are some corner cases where we want to force a full 6422 * commit instead of allowing a directory to be logged. 6423 * 6424 * They revolve around files there were unlinked from the directory, and 6425 * this function updates the parent directory so that a full commit is 6426 * properly done if it is fsync'd later after the unlinks are done. 6427 * 6428 * Must be called before the unlink operations (updates to the subvolume tree, 6429 * inodes, etc) are done. 6430 */ 6431 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 6432 struct btrfs_inode *dir, struct btrfs_inode *inode, 6433 int for_rename) 6434 { 6435 /* 6436 * when we're logging a file, if it hasn't been renamed 6437 * or unlinked, and its inode is fully committed on disk, 6438 * we don't have to worry about walking up the directory chain 6439 * to log its parents. 6440 * 6441 * So, we use the last_unlink_trans field to put this transid 6442 * into the file. When the file is logged we check it and 6443 * don't log the parents if the file is fully on disk. 6444 */ 6445 mutex_lock(&inode->log_mutex); 6446 inode->last_unlink_trans = trans->transid; 6447 mutex_unlock(&inode->log_mutex); 6448 6449 /* 6450 * if this directory was already logged any new 6451 * names for this file/dir will get recorded 6452 */ 6453 if (dir->logged_trans == trans->transid) 6454 return; 6455 6456 /* 6457 * if the inode we're about to unlink was logged, 6458 * the log will be properly updated for any new names 6459 */ 6460 if (inode->logged_trans == trans->transid) 6461 return; 6462 6463 /* 6464 * when renaming files across directories, if the directory 6465 * there we're unlinking from gets fsync'd later on, there's 6466 * no way to find the destination directory later and fsync it 6467 * properly. So, we have to be conservative and force commits 6468 * so the new name gets discovered. 6469 */ 6470 if (for_rename) 6471 goto record; 6472 6473 /* we can safely do the unlink without any special recording */ 6474 return; 6475 6476 record: 6477 mutex_lock(&dir->log_mutex); 6478 dir->last_unlink_trans = trans->transid; 6479 mutex_unlock(&dir->log_mutex); 6480 } 6481 6482 /* 6483 * Make sure that if someone attempts to fsync the parent directory of a deleted 6484 * snapshot, it ends up triggering a transaction commit. This is to guarantee 6485 * that after replaying the log tree of the parent directory's root we will not 6486 * see the snapshot anymore and at log replay time we will not see any log tree 6487 * corresponding to the deleted snapshot's root, which could lead to replaying 6488 * it after replaying the log tree of the parent directory (which would replay 6489 * the snapshot delete operation). 6490 * 6491 * Must be called before the actual snapshot destroy operation (updates to the 6492 * parent root and tree of tree roots trees, etc) are done. 6493 */ 6494 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 6495 struct btrfs_inode *dir) 6496 { 6497 mutex_lock(&dir->log_mutex); 6498 dir->last_unlink_trans = trans->transid; 6499 mutex_unlock(&dir->log_mutex); 6500 } 6501 6502 /* 6503 * Call this after adding a new name for a file and it will properly 6504 * update the log to reflect the new name. 6505 * 6506 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's 6507 * true (because it's not used). 6508 * 6509 * Return value depends on whether @sync_log is true or false. 6510 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6511 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT 6512 * otherwise. 6513 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to 6514 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, 6515 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6516 * committed (without attempting to sync the log). 6517 */ 6518 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 6519 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6520 struct dentry *parent, 6521 bool sync_log, struct btrfs_log_ctx *ctx) 6522 { 6523 struct btrfs_fs_info *fs_info = trans->fs_info; 6524 int ret; 6525 6526 /* 6527 * this will force the logging code to walk the dentry chain 6528 * up for the file 6529 */ 6530 if (!S_ISDIR(inode->vfs_inode.i_mode)) 6531 inode->last_unlink_trans = trans->transid; 6532 6533 /* 6534 * if this inode hasn't been logged and directory we're renaming it 6535 * from hasn't been logged, we don't need to log it 6536 */ 6537 if (inode->logged_trans <= fs_info->last_trans_committed && 6538 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) 6539 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : 6540 BTRFS_DONT_NEED_LOG_SYNC; 6541 6542 if (sync_log) { 6543 struct btrfs_log_ctx ctx2; 6544 6545 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); 6546 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6547 LOG_INODE_EXISTS, &ctx2); 6548 if (ret == BTRFS_NO_LOG_SYNC) 6549 return BTRFS_DONT_NEED_TRANS_COMMIT; 6550 else if (ret) 6551 return BTRFS_NEED_TRANS_COMMIT; 6552 6553 ret = btrfs_sync_log(trans, inode->root, &ctx2); 6554 if (ret) 6555 return BTRFS_NEED_TRANS_COMMIT; 6556 return BTRFS_DONT_NEED_TRANS_COMMIT; 6557 } 6558 6559 ASSERT(ctx); 6560 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6561 LOG_INODE_EXISTS, ctx); 6562 if (ret == BTRFS_NO_LOG_SYNC) 6563 return BTRFS_DONT_NEED_LOG_SYNC; 6564 else if (ret) 6565 return BTRFS_NEED_TRANS_COMMIT; 6566 6567 return BTRFS_NEED_LOG_SYNC; 6568 } 6569 6570