1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/list_sort.h> 10 #include <linux/iversion.h> 11 #include "ctree.h" 12 #include "tree-log.h" 13 #include "disk-io.h" 14 #include "locking.h" 15 #include "print-tree.h" 16 #include "backref.h" 17 #include "compression.h" 18 #include "qgroup.h" 19 #include "inode-map.h" 20 21 /* magic values for the inode_only field in btrfs_log_inode: 22 * 23 * LOG_INODE_ALL means to log everything 24 * LOG_INODE_EXISTS means to log just enough to recreate the inode 25 * during log replay 26 */ 27 #define LOG_INODE_ALL 0 28 #define LOG_INODE_EXISTS 1 29 #define LOG_OTHER_INODE 2 30 31 /* 32 * directory trouble cases 33 * 34 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 35 * log, we must force a full commit before doing an fsync of the directory 36 * where the unlink was done. 37 * ---> record transid of last unlink/rename per directory 38 * 39 * mkdir foo/some_dir 40 * normal commit 41 * rename foo/some_dir foo2/some_dir 42 * mkdir foo/some_dir 43 * fsync foo/some_dir/some_file 44 * 45 * The fsync above will unlink the original some_dir without recording 46 * it in its new location (foo2). After a crash, some_dir will be gone 47 * unless the fsync of some_file forces a full commit 48 * 49 * 2) we must log any new names for any file or dir that is in the fsync 50 * log. ---> check inode while renaming/linking. 51 * 52 * 2a) we must log any new names for any file or dir during rename 53 * when the directory they are being removed from was logged. 54 * ---> check inode and old parent dir during rename 55 * 56 * 2a is actually the more important variant. With the extra logging 57 * a crash might unlink the old name without recreating the new one 58 * 59 * 3) after a crash, we must go through any directories with a link count 60 * of zero and redo the rm -rf 61 * 62 * mkdir f1/foo 63 * normal commit 64 * rm -rf f1/foo 65 * fsync(f1) 66 * 67 * The directory f1 was fully removed from the FS, but fsync was never 68 * called on f1, only its parent dir. After a crash the rm -rf must 69 * be replayed. This must be able to recurse down the entire 70 * directory tree. The inode link count fixup code takes care of the 71 * ugly details. 72 */ 73 74 /* 75 * stages for the tree walking. The first 76 * stage (0) is to only pin down the blocks we find 77 * the second stage (1) is to make sure that all the inodes 78 * we find in the log are created in the subvolume. 79 * 80 * The last stage is to deal with directories and links and extents 81 * and all the other fun semantics 82 */ 83 #define LOG_WALK_PIN_ONLY 0 84 #define LOG_WALK_REPLAY_INODES 1 85 #define LOG_WALK_REPLAY_DIR_INDEX 2 86 #define LOG_WALK_REPLAY_ALL 3 87 88 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 89 struct btrfs_root *root, struct btrfs_inode *inode, 90 int inode_only, 91 const loff_t start, 92 const loff_t end, 93 struct btrfs_log_ctx *ctx); 94 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 95 struct btrfs_root *root, 96 struct btrfs_path *path, u64 objectid); 97 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, 99 struct btrfs_root *log, 100 struct btrfs_path *path, 101 u64 dirid, int del_all); 102 103 /* 104 * tree logging is a special write ahead log used to make sure that 105 * fsyncs and O_SYNCs can happen without doing full tree commits. 106 * 107 * Full tree commits are expensive because they require commonly 108 * modified blocks to be recowed, creating many dirty pages in the 109 * extent tree an 4x-6x higher write load than ext3. 110 * 111 * Instead of doing a tree commit on every fsync, we use the 112 * key ranges and transaction ids to find items for a given file or directory 113 * that have changed in this transaction. Those items are copied into 114 * a special tree (one per subvolume root), that tree is written to disk 115 * and then the fsync is considered complete. 116 * 117 * After a crash, items are copied out of the log-tree back into the 118 * subvolume tree. Any file data extents found are recorded in the extent 119 * allocation tree, and the log-tree freed. 120 * 121 * The log tree is read three times, once to pin down all the extents it is 122 * using in ram and once, once to create all the inodes logged in the tree 123 * and once to do all the other items. 124 */ 125 126 /* 127 * start a sub transaction and setup the log tree 128 * this increments the log tree writer count to make the people 129 * syncing the tree wait for us to finish 130 */ 131 static int start_log_trans(struct btrfs_trans_handle *trans, 132 struct btrfs_root *root, 133 struct btrfs_log_ctx *ctx) 134 { 135 struct btrfs_fs_info *fs_info = root->fs_info; 136 int ret = 0; 137 138 mutex_lock(&root->log_mutex); 139 140 if (root->log_root) { 141 if (btrfs_need_log_full_commit(fs_info, trans)) { 142 ret = -EAGAIN; 143 goto out; 144 } 145 146 if (!root->log_start_pid) { 147 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 148 root->log_start_pid = current->pid; 149 } else if (root->log_start_pid != current->pid) { 150 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 151 } 152 } else { 153 mutex_lock(&fs_info->tree_log_mutex); 154 if (!fs_info->log_root_tree) 155 ret = btrfs_init_log_root_tree(trans, fs_info); 156 mutex_unlock(&fs_info->tree_log_mutex); 157 if (ret) 158 goto out; 159 160 ret = btrfs_add_log_tree(trans, root); 161 if (ret) 162 goto out; 163 164 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 165 root->log_start_pid = current->pid; 166 } 167 168 atomic_inc(&root->log_batch); 169 atomic_inc(&root->log_writers); 170 if (ctx) { 171 int index = root->log_transid % 2; 172 list_add_tail(&ctx->list, &root->log_ctxs[index]); 173 ctx->log_transid = root->log_transid; 174 } 175 176 out: 177 mutex_unlock(&root->log_mutex); 178 return ret; 179 } 180 181 /* 182 * returns 0 if there was a log transaction running and we were able 183 * to join, or returns -ENOENT if there were not transactions 184 * in progress 185 */ 186 static int join_running_log_trans(struct btrfs_root *root) 187 { 188 int ret = -ENOENT; 189 190 smp_mb(); 191 if (!root->log_root) 192 return -ENOENT; 193 194 mutex_lock(&root->log_mutex); 195 if (root->log_root) { 196 ret = 0; 197 atomic_inc(&root->log_writers); 198 } 199 mutex_unlock(&root->log_mutex); 200 return ret; 201 } 202 203 /* 204 * This either makes the current running log transaction wait 205 * until you call btrfs_end_log_trans() or it makes any future 206 * log transactions wait until you call btrfs_end_log_trans() 207 */ 208 void btrfs_pin_log_trans(struct btrfs_root *root) 209 { 210 mutex_lock(&root->log_mutex); 211 atomic_inc(&root->log_writers); 212 mutex_unlock(&root->log_mutex); 213 } 214 215 /* 216 * indicate we're done making changes to the log tree 217 * and wake up anyone waiting to do a sync 218 */ 219 void btrfs_end_log_trans(struct btrfs_root *root) 220 { 221 if (atomic_dec_and_test(&root->log_writers)) { 222 /* atomic_dec_and_test implies a barrier */ 223 cond_wake_up_nomb(&root->log_writer_wait); 224 } 225 } 226 227 228 /* 229 * the walk control struct is used to pass state down the chain when 230 * processing the log tree. The stage field tells us which part 231 * of the log tree processing we are currently doing. The others 232 * are state fields used for that specific part 233 */ 234 struct walk_control { 235 /* should we free the extent on disk when done? This is used 236 * at transaction commit time while freeing a log tree 237 */ 238 int free; 239 240 /* should we write out the extent buffer? This is used 241 * while flushing the log tree to disk during a sync 242 */ 243 int write; 244 245 /* should we wait for the extent buffer io to finish? Also used 246 * while flushing the log tree to disk for a sync 247 */ 248 int wait; 249 250 /* pin only walk, we record which extents on disk belong to the 251 * log trees 252 */ 253 int pin; 254 255 /* what stage of the replay code we're currently in */ 256 int stage; 257 258 /* 259 * Ignore any items from the inode currently being processed. Needs 260 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in 261 * the LOG_WALK_REPLAY_INODES stage. 262 */ 263 bool ignore_cur_inode; 264 265 /* the root we are currently replaying */ 266 struct btrfs_root *replay_dest; 267 268 /* the trans handle for the current replay */ 269 struct btrfs_trans_handle *trans; 270 271 /* the function that gets used to process blocks we find in the 272 * tree. Note the extent_buffer might not be up to date when it is 273 * passed in, and it must be checked or read if you need the data 274 * inside it 275 */ 276 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 277 struct walk_control *wc, u64 gen, int level); 278 }; 279 280 /* 281 * process_func used to pin down extents, write them or wait on them 282 */ 283 static int process_one_buffer(struct btrfs_root *log, 284 struct extent_buffer *eb, 285 struct walk_control *wc, u64 gen, int level) 286 { 287 struct btrfs_fs_info *fs_info = log->fs_info; 288 int ret = 0; 289 290 /* 291 * If this fs is mixed then we need to be able to process the leaves to 292 * pin down any logged extents, so we have to read the block. 293 */ 294 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 295 ret = btrfs_read_buffer(eb, gen, level, NULL); 296 if (ret) 297 return ret; 298 } 299 300 if (wc->pin) 301 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, 302 eb->len); 303 304 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 305 if (wc->pin && btrfs_header_level(eb) == 0) 306 ret = btrfs_exclude_logged_extents(fs_info, eb); 307 if (wc->write) 308 btrfs_write_tree_block(eb); 309 if (wc->wait) 310 btrfs_wait_tree_block_writeback(eb); 311 } 312 return ret; 313 } 314 315 /* 316 * Item overwrite used by replay and tree logging. eb, slot and key all refer 317 * to the src data we are copying out. 318 * 319 * root is the tree we are copying into, and path is a scratch 320 * path for use in this function (it should be released on entry and 321 * will be released on exit). 322 * 323 * If the key is already in the destination tree the existing item is 324 * overwritten. If the existing item isn't big enough, it is extended. 325 * If it is too large, it is truncated. 326 * 327 * If the key isn't in the destination yet, a new item is inserted. 328 */ 329 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 330 struct btrfs_root *root, 331 struct btrfs_path *path, 332 struct extent_buffer *eb, int slot, 333 struct btrfs_key *key) 334 { 335 struct btrfs_fs_info *fs_info = root->fs_info; 336 int ret; 337 u32 item_size; 338 u64 saved_i_size = 0; 339 int save_old_i_size = 0; 340 unsigned long src_ptr; 341 unsigned long dst_ptr; 342 int overwrite_root = 0; 343 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 344 345 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 346 overwrite_root = 1; 347 348 item_size = btrfs_item_size_nr(eb, slot); 349 src_ptr = btrfs_item_ptr_offset(eb, slot); 350 351 /* look for the key in the destination tree */ 352 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 353 if (ret < 0) 354 return ret; 355 356 if (ret == 0) { 357 char *src_copy; 358 char *dst_copy; 359 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 360 path->slots[0]); 361 if (dst_size != item_size) 362 goto insert; 363 364 if (item_size == 0) { 365 btrfs_release_path(path); 366 return 0; 367 } 368 dst_copy = kmalloc(item_size, GFP_NOFS); 369 src_copy = kmalloc(item_size, GFP_NOFS); 370 if (!dst_copy || !src_copy) { 371 btrfs_release_path(path); 372 kfree(dst_copy); 373 kfree(src_copy); 374 return -ENOMEM; 375 } 376 377 read_extent_buffer(eb, src_copy, src_ptr, item_size); 378 379 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 380 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 381 item_size); 382 ret = memcmp(dst_copy, src_copy, item_size); 383 384 kfree(dst_copy); 385 kfree(src_copy); 386 /* 387 * they have the same contents, just return, this saves 388 * us from cowing blocks in the destination tree and doing 389 * extra writes that may not have been done by a previous 390 * sync 391 */ 392 if (ret == 0) { 393 btrfs_release_path(path); 394 return 0; 395 } 396 397 /* 398 * We need to load the old nbytes into the inode so when we 399 * replay the extents we've logged we get the right nbytes. 400 */ 401 if (inode_item) { 402 struct btrfs_inode_item *item; 403 u64 nbytes; 404 u32 mode; 405 406 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 407 struct btrfs_inode_item); 408 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 409 item = btrfs_item_ptr(eb, slot, 410 struct btrfs_inode_item); 411 btrfs_set_inode_nbytes(eb, item, nbytes); 412 413 /* 414 * If this is a directory we need to reset the i_size to 415 * 0 so that we can set it up properly when replaying 416 * the rest of the items in this log. 417 */ 418 mode = btrfs_inode_mode(eb, item); 419 if (S_ISDIR(mode)) 420 btrfs_set_inode_size(eb, item, 0); 421 } 422 } else if (inode_item) { 423 struct btrfs_inode_item *item; 424 u32 mode; 425 426 /* 427 * New inode, set nbytes to 0 so that the nbytes comes out 428 * properly when we replay the extents. 429 */ 430 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 431 btrfs_set_inode_nbytes(eb, item, 0); 432 433 /* 434 * If this is a directory we need to reset the i_size to 0 so 435 * that we can set it up properly when replaying the rest of 436 * the items in this log. 437 */ 438 mode = btrfs_inode_mode(eb, item); 439 if (S_ISDIR(mode)) 440 btrfs_set_inode_size(eb, item, 0); 441 } 442 insert: 443 btrfs_release_path(path); 444 /* try to insert the key into the destination tree */ 445 path->skip_release_on_error = 1; 446 ret = btrfs_insert_empty_item(trans, root, path, 447 key, item_size); 448 path->skip_release_on_error = 0; 449 450 /* make sure any existing item is the correct size */ 451 if (ret == -EEXIST || ret == -EOVERFLOW) { 452 u32 found_size; 453 found_size = btrfs_item_size_nr(path->nodes[0], 454 path->slots[0]); 455 if (found_size > item_size) 456 btrfs_truncate_item(fs_info, path, item_size, 1); 457 else if (found_size < item_size) 458 btrfs_extend_item(fs_info, path, 459 item_size - found_size); 460 } else if (ret) { 461 return ret; 462 } 463 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 464 path->slots[0]); 465 466 /* don't overwrite an existing inode if the generation number 467 * was logged as zero. This is done when the tree logging code 468 * is just logging an inode to make sure it exists after recovery. 469 * 470 * Also, don't overwrite i_size on directories during replay. 471 * log replay inserts and removes directory items based on the 472 * state of the tree found in the subvolume, and i_size is modified 473 * as it goes 474 */ 475 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 476 struct btrfs_inode_item *src_item; 477 struct btrfs_inode_item *dst_item; 478 479 src_item = (struct btrfs_inode_item *)src_ptr; 480 dst_item = (struct btrfs_inode_item *)dst_ptr; 481 482 if (btrfs_inode_generation(eb, src_item) == 0) { 483 struct extent_buffer *dst_eb = path->nodes[0]; 484 const u64 ino_size = btrfs_inode_size(eb, src_item); 485 486 /* 487 * For regular files an ino_size == 0 is used only when 488 * logging that an inode exists, as part of a directory 489 * fsync, and the inode wasn't fsynced before. In this 490 * case don't set the size of the inode in the fs/subvol 491 * tree, otherwise we would be throwing valid data away. 492 */ 493 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 494 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 495 ino_size != 0) { 496 struct btrfs_map_token token; 497 498 btrfs_init_map_token(&token); 499 btrfs_set_token_inode_size(dst_eb, dst_item, 500 ino_size, &token); 501 } 502 goto no_copy; 503 } 504 505 if (overwrite_root && 506 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 507 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 508 save_old_i_size = 1; 509 saved_i_size = btrfs_inode_size(path->nodes[0], 510 dst_item); 511 } 512 } 513 514 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 515 src_ptr, item_size); 516 517 if (save_old_i_size) { 518 struct btrfs_inode_item *dst_item; 519 dst_item = (struct btrfs_inode_item *)dst_ptr; 520 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 521 } 522 523 /* make sure the generation is filled in */ 524 if (key->type == BTRFS_INODE_ITEM_KEY) { 525 struct btrfs_inode_item *dst_item; 526 dst_item = (struct btrfs_inode_item *)dst_ptr; 527 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 528 btrfs_set_inode_generation(path->nodes[0], dst_item, 529 trans->transid); 530 } 531 } 532 no_copy: 533 btrfs_mark_buffer_dirty(path->nodes[0]); 534 btrfs_release_path(path); 535 return 0; 536 } 537 538 /* 539 * simple helper to read an inode off the disk from a given root 540 * This can only be called for subvolume roots and not for the log 541 */ 542 static noinline struct inode *read_one_inode(struct btrfs_root *root, 543 u64 objectid) 544 { 545 struct btrfs_key key; 546 struct inode *inode; 547 548 key.objectid = objectid; 549 key.type = BTRFS_INODE_ITEM_KEY; 550 key.offset = 0; 551 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 552 if (IS_ERR(inode)) 553 inode = NULL; 554 return inode; 555 } 556 557 /* replays a single extent in 'eb' at 'slot' with 'key' into the 558 * subvolume 'root'. path is released on entry and should be released 559 * on exit. 560 * 561 * extents in the log tree have not been allocated out of the extent 562 * tree yet. So, this completes the allocation, taking a reference 563 * as required if the extent already exists or creating a new extent 564 * if it isn't in the extent allocation tree yet. 565 * 566 * The extent is inserted into the file, dropping any existing extents 567 * from the file that overlap the new one. 568 */ 569 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 570 struct btrfs_root *root, 571 struct btrfs_path *path, 572 struct extent_buffer *eb, int slot, 573 struct btrfs_key *key) 574 { 575 struct btrfs_fs_info *fs_info = root->fs_info; 576 int found_type; 577 u64 extent_end; 578 u64 start = key->offset; 579 u64 nbytes = 0; 580 struct btrfs_file_extent_item *item; 581 struct inode *inode = NULL; 582 unsigned long size; 583 int ret = 0; 584 585 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 586 found_type = btrfs_file_extent_type(eb, item); 587 588 if (found_type == BTRFS_FILE_EXTENT_REG || 589 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 590 nbytes = btrfs_file_extent_num_bytes(eb, item); 591 extent_end = start + nbytes; 592 593 /* 594 * We don't add to the inodes nbytes if we are prealloc or a 595 * hole. 596 */ 597 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 598 nbytes = 0; 599 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 600 size = btrfs_file_extent_ram_bytes(eb, item); 601 nbytes = btrfs_file_extent_ram_bytes(eb, item); 602 extent_end = ALIGN(start + size, 603 fs_info->sectorsize); 604 } else { 605 ret = 0; 606 goto out; 607 } 608 609 inode = read_one_inode(root, key->objectid); 610 if (!inode) { 611 ret = -EIO; 612 goto out; 613 } 614 615 /* 616 * first check to see if we already have this extent in the 617 * file. This must be done before the btrfs_drop_extents run 618 * so we don't try to drop this extent. 619 */ 620 ret = btrfs_lookup_file_extent(trans, root, path, 621 btrfs_ino(BTRFS_I(inode)), start, 0); 622 623 if (ret == 0 && 624 (found_type == BTRFS_FILE_EXTENT_REG || 625 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 626 struct btrfs_file_extent_item cmp1; 627 struct btrfs_file_extent_item cmp2; 628 struct btrfs_file_extent_item *existing; 629 struct extent_buffer *leaf; 630 631 leaf = path->nodes[0]; 632 existing = btrfs_item_ptr(leaf, path->slots[0], 633 struct btrfs_file_extent_item); 634 635 read_extent_buffer(eb, &cmp1, (unsigned long)item, 636 sizeof(cmp1)); 637 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 638 sizeof(cmp2)); 639 640 /* 641 * we already have a pointer to this exact extent, 642 * we don't have to do anything 643 */ 644 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 645 btrfs_release_path(path); 646 goto out; 647 } 648 } 649 btrfs_release_path(path); 650 651 /* drop any overlapping extents */ 652 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 653 if (ret) 654 goto out; 655 656 if (found_type == BTRFS_FILE_EXTENT_REG || 657 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 658 u64 offset; 659 unsigned long dest_offset; 660 struct btrfs_key ins; 661 662 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && 663 btrfs_fs_incompat(fs_info, NO_HOLES)) 664 goto update_inode; 665 666 ret = btrfs_insert_empty_item(trans, root, path, key, 667 sizeof(*item)); 668 if (ret) 669 goto out; 670 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 671 path->slots[0]); 672 copy_extent_buffer(path->nodes[0], eb, dest_offset, 673 (unsigned long)item, sizeof(*item)); 674 675 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 676 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 677 ins.type = BTRFS_EXTENT_ITEM_KEY; 678 offset = key->offset - btrfs_file_extent_offset(eb, item); 679 680 /* 681 * Manually record dirty extent, as here we did a shallow 682 * file extent item copy and skip normal backref update, 683 * but modifying extent tree all by ourselves. 684 * So need to manually record dirty extent for qgroup, 685 * as the owner of the file extent changed from log tree 686 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 687 */ 688 ret = btrfs_qgroup_trace_extent(trans, 689 btrfs_file_extent_disk_bytenr(eb, item), 690 btrfs_file_extent_disk_num_bytes(eb, item), 691 GFP_NOFS); 692 if (ret < 0) 693 goto out; 694 695 if (ins.objectid > 0) { 696 u64 csum_start; 697 u64 csum_end; 698 LIST_HEAD(ordered_sums); 699 /* 700 * is this extent already allocated in the extent 701 * allocation tree? If so, just add a reference 702 */ 703 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 704 ins.offset); 705 if (ret == 0) { 706 ret = btrfs_inc_extent_ref(trans, root, 707 ins.objectid, ins.offset, 708 0, root->root_key.objectid, 709 key->objectid, offset); 710 if (ret) 711 goto out; 712 } else { 713 /* 714 * insert the extent pointer in the extent 715 * allocation tree 716 */ 717 ret = btrfs_alloc_logged_file_extent(trans, 718 root->root_key.objectid, 719 key->objectid, offset, &ins); 720 if (ret) 721 goto out; 722 } 723 btrfs_release_path(path); 724 725 if (btrfs_file_extent_compression(eb, item)) { 726 csum_start = ins.objectid; 727 csum_end = csum_start + ins.offset; 728 } else { 729 csum_start = ins.objectid + 730 btrfs_file_extent_offset(eb, item); 731 csum_end = csum_start + 732 btrfs_file_extent_num_bytes(eb, item); 733 } 734 735 ret = btrfs_lookup_csums_range(root->log_root, 736 csum_start, csum_end - 1, 737 &ordered_sums, 0); 738 if (ret) 739 goto out; 740 /* 741 * Now delete all existing cums in the csum root that 742 * cover our range. We do this because we can have an 743 * extent that is completely referenced by one file 744 * extent item and partially referenced by another 745 * file extent item (like after using the clone or 746 * extent_same ioctls). In this case if we end up doing 747 * the replay of the one that partially references the 748 * extent first, and we do not do the csum deletion 749 * below, we can get 2 csum items in the csum tree that 750 * overlap each other. For example, imagine our log has 751 * the two following file extent items: 752 * 753 * key (257 EXTENT_DATA 409600) 754 * extent data disk byte 12845056 nr 102400 755 * extent data offset 20480 nr 20480 ram 102400 756 * 757 * key (257 EXTENT_DATA 819200) 758 * extent data disk byte 12845056 nr 102400 759 * extent data offset 0 nr 102400 ram 102400 760 * 761 * Where the second one fully references the 100K extent 762 * that starts at disk byte 12845056, and the log tree 763 * has a single csum item that covers the entire range 764 * of the extent: 765 * 766 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 767 * 768 * After the first file extent item is replayed, the 769 * csum tree gets the following csum item: 770 * 771 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 772 * 773 * Which covers the 20K sub-range starting at offset 20K 774 * of our extent. Now when we replay the second file 775 * extent item, if we do not delete existing csum items 776 * that cover any of its blocks, we end up getting two 777 * csum items in our csum tree that overlap each other: 778 * 779 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 780 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 781 * 782 * Which is a problem, because after this anyone trying 783 * to lookup up for the checksum of any block of our 784 * extent starting at an offset of 40K or higher, will 785 * end up looking at the second csum item only, which 786 * does not contain the checksum for any block starting 787 * at offset 40K or higher of our extent. 788 */ 789 while (!list_empty(&ordered_sums)) { 790 struct btrfs_ordered_sum *sums; 791 sums = list_entry(ordered_sums.next, 792 struct btrfs_ordered_sum, 793 list); 794 if (!ret) 795 ret = btrfs_del_csums(trans, fs_info, 796 sums->bytenr, 797 sums->len); 798 if (!ret) 799 ret = btrfs_csum_file_blocks(trans, 800 fs_info->csum_root, sums); 801 list_del(&sums->list); 802 kfree(sums); 803 } 804 if (ret) 805 goto out; 806 } else { 807 btrfs_release_path(path); 808 } 809 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 810 /* inline extents are easy, we just overwrite them */ 811 ret = overwrite_item(trans, root, path, eb, slot, key); 812 if (ret) 813 goto out; 814 } 815 816 inode_add_bytes(inode, nbytes); 817 update_inode: 818 ret = btrfs_update_inode(trans, root, inode); 819 out: 820 if (inode) 821 iput(inode); 822 return ret; 823 } 824 825 /* 826 * when cleaning up conflicts between the directory names in the 827 * subvolume, directory names in the log and directory names in the 828 * inode back references, we may have to unlink inodes from directories. 829 * 830 * This is a helper function to do the unlink of a specific directory 831 * item 832 */ 833 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 834 struct btrfs_root *root, 835 struct btrfs_path *path, 836 struct btrfs_inode *dir, 837 struct btrfs_dir_item *di) 838 { 839 struct inode *inode; 840 char *name; 841 int name_len; 842 struct extent_buffer *leaf; 843 struct btrfs_key location; 844 int ret; 845 846 leaf = path->nodes[0]; 847 848 btrfs_dir_item_key_to_cpu(leaf, di, &location); 849 name_len = btrfs_dir_name_len(leaf, di); 850 name = kmalloc(name_len, GFP_NOFS); 851 if (!name) 852 return -ENOMEM; 853 854 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 855 btrfs_release_path(path); 856 857 inode = read_one_inode(root, location.objectid); 858 if (!inode) { 859 ret = -EIO; 860 goto out; 861 } 862 863 ret = link_to_fixup_dir(trans, root, path, location.objectid); 864 if (ret) 865 goto out; 866 867 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, 868 name_len); 869 if (ret) 870 goto out; 871 else 872 ret = btrfs_run_delayed_items(trans); 873 out: 874 kfree(name); 875 iput(inode); 876 return ret; 877 } 878 879 /* 880 * helper function to see if a given name and sequence number found 881 * in an inode back reference are already in a directory and correctly 882 * point to this inode 883 */ 884 static noinline int inode_in_dir(struct btrfs_root *root, 885 struct btrfs_path *path, 886 u64 dirid, u64 objectid, u64 index, 887 const char *name, int name_len) 888 { 889 struct btrfs_dir_item *di; 890 struct btrfs_key location; 891 int match = 0; 892 893 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 894 index, name, name_len, 0); 895 if (di && !IS_ERR(di)) { 896 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 897 if (location.objectid != objectid) 898 goto out; 899 } else 900 goto out; 901 btrfs_release_path(path); 902 903 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 904 if (di && !IS_ERR(di)) { 905 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 906 if (location.objectid != objectid) 907 goto out; 908 } else 909 goto out; 910 match = 1; 911 out: 912 btrfs_release_path(path); 913 return match; 914 } 915 916 /* 917 * helper function to check a log tree for a named back reference in 918 * an inode. This is used to decide if a back reference that is 919 * found in the subvolume conflicts with what we find in the log. 920 * 921 * inode backreferences may have multiple refs in a single item, 922 * during replay we process one reference at a time, and we don't 923 * want to delete valid links to a file from the subvolume if that 924 * link is also in the log. 925 */ 926 static noinline int backref_in_log(struct btrfs_root *log, 927 struct btrfs_key *key, 928 u64 ref_objectid, 929 const char *name, int namelen) 930 { 931 struct btrfs_path *path; 932 struct btrfs_inode_ref *ref; 933 unsigned long ptr; 934 unsigned long ptr_end; 935 unsigned long name_ptr; 936 int found_name_len; 937 int item_size; 938 int ret; 939 int match = 0; 940 941 path = btrfs_alloc_path(); 942 if (!path) 943 return -ENOMEM; 944 945 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 946 if (ret != 0) 947 goto out; 948 949 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 950 951 if (key->type == BTRFS_INODE_EXTREF_KEY) { 952 if (btrfs_find_name_in_ext_backref(path->nodes[0], 953 path->slots[0], 954 ref_objectid, 955 name, namelen, NULL)) 956 match = 1; 957 958 goto out; 959 } 960 961 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 962 ptr_end = ptr + item_size; 963 while (ptr < ptr_end) { 964 ref = (struct btrfs_inode_ref *)ptr; 965 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 966 if (found_name_len == namelen) { 967 name_ptr = (unsigned long)(ref + 1); 968 ret = memcmp_extent_buffer(path->nodes[0], name, 969 name_ptr, namelen); 970 if (ret == 0) { 971 match = 1; 972 goto out; 973 } 974 } 975 ptr = (unsigned long)(ref + 1) + found_name_len; 976 } 977 out: 978 btrfs_free_path(path); 979 return match; 980 } 981 982 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 983 struct btrfs_root *root, 984 struct btrfs_path *path, 985 struct btrfs_root *log_root, 986 struct btrfs_inode *dir, 987 struct btrfs_inode *inode, 988 u64 inode_objectid, u64 parent_objectid, 989 u64 ref_index, char *name, int namelen, 990 int *search_done) 991 { 992 int ret; 993 char *victim_name; 994 int victim_name_len; 995 struct extent_buffer *leaf; 996 struct btrfs_dir_item *di; 997 struct btrfs_key search_key; 998 struct btrfs_inode_extref *extref; 999 1000 again: 1001 /* Search old style refs */ 1002 search_key.objectid = inode_objectid; 1003 search_key.type = BTRFS_INODE_REF_KEY; 1004 search_key.offset = parent_objectid; 1005 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1006 if (ret == 0) { 1007 struct btrfs_inode_ref *victim_ref; 1008 unsigned long ptr; 1009 unsigned long ptr_end; 1010 1011 leaf = path->nodes[0]; 1012 1013 /* are we trying to overwrite a back ref for the root directory 1014 * if so, just jump out, we're done 1015 */ 1016 if (search_key.objectid == search_key.offset) 1017 return 1; 1018 1019 /* check all the names in this back reference to see 1020 * if they are in the log. if so, we allow them to stay 1021 * otherwise they must be unlinked as a conflict 1022 */ 1023 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1024 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1025 while (ptr < ptr_end) { 1026 victim_ref = (struct btrfs_inode_ref *)ptr; 1027 victim_name_len = btrfs_inode_ref_name_len(leaf, 1028 victim_ref); 1029 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1030 if (!victim_name) 1031 return -ENOMEM; 1032 1033 read_extent_buffer(leaf, victim_name, 1034 (unsigned long)(victim_ref + 1), 1035 victim_name_len); 1036 1037 if (!backref_in_log(log_root, &search_key, 1038 parent_objectid, 1039 victim_name, 1040 victim_name_len)) { 1041 inc_nlink(&inode->vfs_inode); 1042 btrfs_release_path(path); 1043 1044 ret = btrfs_unlink_inode(trans, root, dir, inode, 1045 victim_name, victim_name_len); 1046 kfree(victim_name); 1047 if (ret) 1048 return ret; 1049 ret = btrfs_run_delayed_items(trans); 1050 if (ret) 1051 return ret; 1052 *search_done = 1; 1053 goto again; 1054 } 1055 kfree(victim_name); 1056 1057 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1058 } 1059 1060 /* 1061 * NOTE: we have searched root tree and checked the 1062 * corresponding ref, it does not need to check again. 1063 */ 1064 *search_done = 1; 1065 } 1066 btrfs_release_path(path); 1067 1068 /* Same search but for extended refs */ 1069 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1070 inode_objectid, parent_objectid, 0, 1071 0); 1072 if (!IS_ERR_OR_NULL(extref)) { 1073 u32 item_size; 1074 u32 cur_offset = 0; 1075 unsigned long base; 1076 struct inode *victim_parent; 1077 1078 leaf = path->nodes[0]; 1079 1080 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1081 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1082 1083 while (cur_offset < item_size) { 1084 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1085 1086 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1087 1088 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1089 goto next; 1090 1091 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1092 if (!victim_name) 1093 return -ENOMEM; 1094 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1095 victim_name_len); 1096 1097 search_key.objectid = inode_objectid; 1098 search_key.type = BTRFS_INODE_EXTREF_KEY; 1099 search_key.offset = btrfs_extref_hash(parent_objectid, 1100 victim_name, 1101 victim_name_len); 1102 ret = 0; 1103 if (!backref_in_log(log_root, &search_key, 1104 parent_objectid, victim_name, 1105 victim_name_len)) { 1106 ret = -ENOENT; 1107 victim_parent = read_one_inode(root, 1108 parent_objectid); 1109 if (victim_parent) { 1110 inc_nlink(&inode->vfs_inode); 1111 btrfs_release_path(path); 1112 1113 ret = btrfs_unlink_inode(trans, root, 1114 BTRFS_I(victim_parent), 1115 inode, 1116 victim_name, 1117 victim_name_len); 1118 if (!ret) 1119 ret = btrfs_run_delayed_items( 1120 trans); 1121 } 1122 iput(victim_parent); 1123 kfree(victim_name); 1124 if (ret) 1125 return ret; 1126 *search_done = 1; 1127 goto again; 1128 } 1129 kfree(victim_name); 1130 next: 1131 cur_offset += victim_name_len + sizeof(*extref); 1132 } 1133 *search_done = 1; 1134 } 1135 btrfs_release_path(path); 1136 1137 /* look for a conflicting sequence number */ 1138 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1139 ref_index, name, namelen, 0); 1140 if (di && !IS_ERR(di)) { 1141 ret = drop_one_dir_item(trans, root, path, dir, di); 1142 if (ret) 1143 return ret; 1144 } 1145 btrfs_release_path(path); 1146 1147 /* look for a conflicting name */ 1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1149 name, namelen, 0); 1150 if (di && !IS_ERR(di)) { 1151 ret = drop_one_dir_item(trans, root, path, dir, di); 1152 if (ret) 1153 return ret; 1154 } 1155 btrfs_release_path(path); 1156 1157 return 0; 1158 } 1159 1160 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1161 u32 *namelen, char **name, u64 *index, 1162 u64 *parent_objectid) 1163 { 1164 struct btrfs_inode_extref *extref; 1165 1166 extref = (struct btrfs_inode_extref *)ref_ptr; 1167 1168 *namelen = btrfs_inode_extref_name_len(eb, extref); 1169 *name = kmalloc(*namelen, GFP_NOFS); 1170 if (*name == NULL) 1171 return -ENOMEM; 1172 1173 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1174 *namelen); 1175 1176 if (index) 1177 *index = btrfs_inode_extref_index(eb, extref); 1178 if (parent_objectid) 1179 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1180 1181 return 0; 1182 } 1183 1184 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1185 u32 *namelen, char **name, u64 *index) 1186 { 1187 struct btrfs_inode_ref *ref; 1188 1189 ref = (struct btrfs_inode_ref *)ref_ptr; 1190 1191 *namelen = btrfs_inode_ref_name_len(eb, ref); 1192 *name = kmalloc(*namelen, GFP_NOFS); 1193 if (*name == NULL) 1194 return -ENOMEM; 1195 1196 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1197 1198 if (index) 1199 *index = btrfs_inode_ref_index(eb, ref); 1200 1201 return 0; 1202 } 1203 1204 /* 1205 * Take an inode reference item from the log tree and iterate all names from the 1206 * inode reference item in the subvolume tree with the same key (if it exists). 1207 * For any name that is not in the inode reference item from the log tree, do a 1208 * proper unlink of that name (that is, remove its entry from the inode 1209 * reference item and both dir index keys). 1210 */ 1211 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, 1212 struct btrfs_root *root, 1213 struct btrfs_path *path, 1214 struct btrfs_inode *inode, 1215 struct extent_buffer *log_eb, 1216 int log_slot, 1217 struct btrfs_key *key) 1218 { 1219 int ret; 1220 unsigned long ref_ptr; 1221 unsigned long ref_end; 1222 struct extent_buffer *eb; 1223 1224 again: 1225 btrfs_release_path(path); 1226 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 1227 if (ret > 0) { 1228 ret = 0; 1229 goto out; 1230 } 1231 if (ret < 0) 1232 goto out; 1233 1234 eb = path->nodes[0]; 1235 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 1236 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); 1237 while (ref_ptr < ref_end) { 1238 char *name = NULL; 1239 int namelen; 1240 u64 parent_id; 1241 1242 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1243 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1244 NULL, &parent_id); 1245 } else { 1246 parent_id = key->offset; 1247 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1248 NULL); 1249 } 1250 if (ret) 1251 goto out; 1252 1253 if (key->type == BTRFS_INODE_EXTREF_KEY) 1254 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot, 1255 parent_id, name, 1256 namelen, NULL); 1257 else 1258 ret = btrfs_find_name_in_backref(log_eb, log_slot, name, 1259 namelen, NULL); 1260 1261 if (!ret) { 1262 struct inode *dir; 1263 1264 btrfs_release_path(path); 1265 dir = read_one_inode(root, parent_id); 1266 if (!dir) { 1267 ret = -ENOENT; 1268 kfree(name); 1269 goto out; 1270 } 1271 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 1272 inode, name, namelen); 1273 kfree(name); 1274 iput(dir); 1275 if (ret) 1276 goto out; 1277 goto again; 1278 } 1279 1280 kfree(name); 1281 ref_ptr += namelen; 1282 if (key->type == BTRFS_INODE_EXTREF_KEY) 1283 ref_ptr += sizeof(struct btrfs_inode_extref); 1284 else 1285 ref_ptr += sizeof(struct btrfs_inode_ref); 1286 } 1287 ret = 0; 1288 out: 1289 btrfs_release_path(path); 1290 return ret; 1291 } 1292 1293 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, 1294 const u8 ref_type, const char *name, 1295 const int namelen) 1296 { 1297 struct btrfs_key key; 1298 struct btrfs_path *path; 1299 const u64 parent_id = btrfs_ino(BTRFS_I(dir)); 1300 int ret; 1301 1302 path = btrfs_alloc_path(); 1303 if (!path) 1304 return -ENOMEM; 1305 1306 key.objectid = btrfs_ino(BTRFS_I(inode)); 1307 key.type = ref_type; 1308 if (key.type == BTRFS_INODE_REF_KEY) 1309 key.offset = parent_id; 1310 else 1311 key.offset = btrfs_extref_hash(parent_id, name, namelen); 1312 1313 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0); 1314 if (ret < 0) 1315 goto out; 1316 if (ret > 0) { 1317 ret = 0; 1318 goto out; 1319 } 1320 if (key.type == BTRFS_INODE_EXTREF_KEY) 1321 ret = btrfs_find_name_in_ext_backref(path->nodes[0], 1322 path->slots[0], parent_id, 1323 name, namelen, NULL); 1324 else 1325 ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 1326 name, namelen, NULL); 1327 1328 out: 1329 btrfs_free_path(path); 1330 return ret; 1331 } 1332 1333 /* 1334 * replay one inode back reference item found in the log tree. 1335 * eb, slot and key refer to the buffer and key found in the log tree. 1336 * root is the destination we are replaying into, and path is for temp 1337 * use by this function. (it should be released on return). 1338 */ 1339 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1340 struct btrfs_root *root, 1341 struct btrfs_root *log, 1342 struct btrfs_path *path, 1343 struct extent_buffer *eb, int slot, 1344 struct btrfs_key *key) 1345 { 1346 struct inode *dir = NULL; 1347 struct inode *inode = NULL; 1348 unsigned long ref_ptr; 1349 unsigned long ref_end; 1350 char *name = NULL; 1351 int namelen; 1352 int ret; 1353 int search_done = 0; 1354 int log_ref_ver = 0; 1355 u64 parent_objectid; 1356 u64 inode_objectid; 1357 u64 ref_index = 0; 1358 int ref_struct_size; 1359 1360 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1361 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1362 1363 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1364 struct btrfs_inode_extref *r; 1365 1366 ref_struct_size = sizeof(struct btrfs_inode_extref); 1367 log_ref_ver = 1; 1368 r = (struct btrfs_inode_extref *)ref_ptr; 1369 parent_objectid = btrfs_inode_extref_parent(eb, r); 1370 } else { 1371 ref_struct_size = sizeof(struct btrfs_inode_ref); 1372 parent_objectid = key->offset; 1373 } 1374 inode_objectid = key->objectid; 1375 1376 /* 1377 * it is possible that we didn't log all the parent directories 1378 * for a given inode. If we don't find the dir, just don't 1379 * copy the back ref in. The link count fixup code will take 1380 * care of the rest 1381 */ 1382 dir = read_one_inode(root, parent_objectid); 1383 if (!dir) { 1384 ret = -ENOENT; 1385 goto out; 1386 } 1387 1388 inode = read_one_inode(root, inode_objectid); 1389 if (!inode) { 1390 ret = -EIO; 1391 goto out; 1392 } 1393 1394 while (ref_ptr < ref_end) { 1395 if (log_ref_ver) { 1396 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1397 &ref_index, &parent_objectid); 1398 /* 1399 * parent object can change from one array 1400 * item to another. 1401 */ 1402 if (!dir) 1403 dir = read_one_inode(root, parent_objectid); 1404 if (!dir) { 1405 ret = -ENOENT; 1406 goto out; 1407 } 1408 } else { 1409 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1410 &ref_index); 1411 } 1412 if (ret) 1413 goto out; 1414 1415 /* if we already have a perfect match, we're done */ 1416 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), 1417 btrfs_ino(BTRFS_I(inode)), ref_index, 1418 name, namelen)) { 1419 /* 1420 * look for a conflicting back reference in the 1421 * metadata. if we find one we have to unlink that name 1422 * of the file before we add our new link. Later on, we 1423 * overwrite any existing back reference, and we don't 1424 * want to create dangling pointers in the directory. 1425 */ 1426 1427 if (!search_done) { 1428 ret = __add_inode_ref(trans, root, path, log, 1429 BTRFS_I(dir), 1430 BTRFS_I(inode), 1431 inode_objectid, 1432 parent_objectid, 1433 ref_index, name, namelen, 1434 &search_done); 1435 if (ret) { 1436 if (ret == 1) 1437 ret = 0; 1438 goto out; 1439 } 1440 } 1441 1442 /* 1443 * If a reference item already exists for this inode 1444 * with the same parent and name, but different index, 1445 * drop it and the corresponding directory index entries 1446 * from the parent before adding the new reference item 1447 * and dir index entries, otherwise we would fail with 1448 * -EEXIST returned from btrfs_add_link() below. 1449 */ 1450 ret = btrfs_inode_ref_exists(inode, dir, key->type, 1451 name, namelen); 1452 if (ret > 0) { 1453 ret = btrfs_unlink_inode(trans, root, 1454 BTRFS_I(dir), 1455 BTRFS_I(inode), 1456 name, namelen); 1457 /* 1458 * If we dropped the link count to 0, bump it so 1459 * that later the iput() on the inode will not 1460 * free it. We will fixup the link count later. 1461 */ 1462 if (!ret && inode->i_nlink == 0) 1463 inc_nlink(inode); 1464 } 1465 if (ret < 0) 1466 goto out; 1467 1468 /* insert our name */ 1469 ret = btrfs_add_link(trans, BTRFS_I(dir), 1470 BTRFS_I(inode), 1471 name, namelen, 0, ref_index); 1472 if (ret) 1473 goto out; 1474 1475 btrfs_update_inode(trans, root, inode); 1476 } 1477 1478 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1479 kfree(name); 1480 name = NULL; 1481 if (log_ref_ver) { 1482 iput(dir); 1483 dir = NULL; 1484 } 1485 } 1486 1487 /* 1488 * Before we overwrite the inode reference item in the subvolume tree 1489 * with the item from the log tree, we must unlink all names from the 1490 * parent directory that are in the subvolume's tree inode reference 1491 * item, otherwise we end up with an inconsistent subvolume tree where 1492 * dir index entries exist for a name but there is no inode reference 1493 * item with the same name. 1494 */ 1495 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, 1496 key); 1497 if (ret) 1498 goto out; 1499 1500 /* finally write the back reference in the inode */ 1501 ret = overwrite_item(trans, root, path, eb, slot, key); 1502 out: 1503 btrfs_release_path(path); 1504 kfree(name); 1505 iput(dir); 1506 iput(inode); 1507 return ret; 1508 } 1509 1510 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1511 struct btrfs_root *root, u64 ino) 1512 { 1513 int ret; 1514 1515 ret = btrfs_insert_orphan_item(trans, root, ino); 1516 if (ret == -EEXIST) 1517 ret = 0; 1518 1519 return ret; 1520 } 1521 1522 static int count_inode_extrefs(struct btrfs_root *root, 1523 struct btrfs_inode *inode, struct btrfs_path *path) 1524 { 1525 int ret = 0; 1526 int name_len; 1527 unsigned int nlink = 0; 1528 u32 item_size; 1529 u32 cur_offset = 0; 1530 u64 inode_objectid = btrfs_ino(inode); 1531 u64 offset = 0; 1532 unsigned long ptr; 1533 struct btrfs_inode_extref *extref; 1534 struct extent_buffer *leaf; 1535 1536 while (1) { 1537 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1538 &extref, &offset); 1539 if (ret) 1540 break; 1541 1542 leaf = path->nodes[0]; 1543 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1544 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1545 cur_offset = 0; 1546 1547 while (cur_offset < item_size) { 1548 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1549 name_len = btrfs_inode_extref_name_len(leaf, extref); 1550 1551 nlink++; 1552 1553 cur_offset += name_len + sizeof(*extref); 1554 } 1555 1556 offset++; 1557 btrfs_release_path(path); 1558 } 1559 btrfs_release_path(path); 1560 1561 if (ret < 0 && ret != -ENOENT) 1562 return ret; 1563 return nlink; 1564 } 1565 1566 static int count_inode_refs(struct btrfs_root *root, 1567 struct btrfs_inode *inode, struct btrfs_path *path) 1568 { 1569 int ret; 1570 struct btrfs_key key; 1571 unsigned int nlink = 0; 1572 unsigned long ptr; 1573 unsigned long ptr_end; 1574 int name_len; 1575 u64 ino = btrfs_ino(inode); 1576 1577 key.objectid = ino; 1578 key.type = BTRFS_INODE_REF_KEY; 1579 key.offset = (u64)-1; 1580 1581 while (1) { 1582 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1583 if (ret < 0) 1584 break; 1585 if (ret > 0) { 1586 if (path->slots[0] == 0) 1587 break; 1588 path->slots[0]--; 1589 } 1590 process_slot: 1591 btrfs_item_key_to_cpu(path->nodes[0], &key, 1592 path->slots[0]); 1593 if (key.objectid != ino || 1594 key.type != BTRFS_INODE_REF_KEY) 1595 break; 1596 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1597 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1598 path->slots[0]); 1599 while (ptr < ptr_end) { 1600 struct btrfs_inode_ref *ref; 1601 1602 ref = (struct btrfs_inode_ref *)ptr; 1603 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1604 ref); 1605 ptr = (unsigned long)(ref + 1) + name_len; 1606 nlink++; 1607 } 1608 1609 if (key.offset == 0) 1610 break; 1611 if (path->slots[0] > 0) { 1612 path->slots[0]--; 1613 goto process_slot; 1614 } 1615 key.offset--; 1616 btrfs_release_path(path); 1617 } 1618 btrfs_release_path(path); 1619 1620 return nlink; 1621 } 1622 1623 /* 1624 * There are a few corners where the link count of the file can't 1625 * be properly maintained during replay. So, instead of adding 1626 * lots of complexity to the log code, we just scan the backrefs 1627 * for any file that has been through replay. 1628 * 1629 * The scan will update the link count on the inode to reflect the 1630 * number of back refs found. If it goes down to zero, the iput 1631 * will free the inode. 1632 */ 1633 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1634 struct btrfs_root *root, 1635 struct inode *inode) 1636 { 1637 struct btrfs_path *path; 1638 int ret; 1639 u64 nlink = 0; 1640 u64 ino = btrfs_ino(BTRFS_I(inode)); 1641 1642 path = btrfs_alloc_path(); 1643 if (!path) 1644 return -ENOMEM; 1645 1646 ret = count_inode_refs(root, BTRFS_I(inode), path); 1647 if (ret < 0) 1648 goto out; 1649 1650 nlink = ret; 1651 1652 ret = count_inode_extrefs(root, BTRFS_I(inode), path); 1653 if (ret < 0) 1654 goto out; 1655 1656 nlink += ret; 1657 1658 ret = 0; 1659 1660 if (nlink != inode->i_nlink) { 1661 set_nlink(inode, nlink); 1662 btrfs_update_inode(trans, root, inode); 1663 } 1664 BTRFS_I(inode)->index_cnt = (u64)-1; 1665 1666 if (inode->i_nlink == 0) { 1667 if (S_ISDIR(inode->i_mode)) { 1668 ret = replay_dir_deletes(trans, root, NULL, path, 1669 ino, 1); 1670 if (ret) 1671 goto out; 1672 } 1673 ret = insert_orphan_item(trans, root, ino); 1674 } 1675 1676 out: 1677 btrfs_free_path(path); 1678 return ret; 1679 } 1680 1681 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1682 struct btrfs_root *root, 1683 struct btrfs_path *path) 1684 { 1685 int ret; 1686 struct btrfs_key key; 1687 struct inode *inode; 1688 1689 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1690 key.type = BTRFS_ORPHAN_ITEM_KEY; 1691 key.offset = (u64)-1; 1692 while (1) { 1693 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1694 if (ret < 0) 1695 break; 1696 1697 if (ret == 1) { 1698 if (path->slots[0] == 0) 1699 break; 1700 path->slots[0]--; 1701 } 1702 1703 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1704 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1705 key.type != BTRFS_ORPHAN_ITEM_KEY) 1706 break; 1707 1708 ret = btrfs_del_item(trans, root, path); 1709 if (ret) 1710 goto out; 1711 1712 btrfs_release_path(path); 1713 inode = read_one_inode(root, key.offset); 1714 if (!inode) 1715 return -EIO; 1716 1717 ret = fixup_inode_link_count(trans, root, inode); 1718 iput(inode); 1719 if (ret) 1720 goto out; 1721 1722 /* 1723 * fixup on a directory may create new entries, 1724 * make sure we always look for the highset possible 1725 * offset 1726 */ 1727 key.offset = (u64)-1; 1728 } 1729 ret = 0; 1730 out: 1731 btrfs_release_path(path); 1732 return ret; 1733 } 1734 1735 1736 /* 1737 * record a given inode in the fixup dir so we can check its link 1738 * count when replay is done. The link count is incremented here 1739 * so the inode won't go away until we check it 1740 */ 1741 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1742 struct btrfs_root *root, 1743 struct btrfs_path *path, 1744 u64 objectid) 1745 { 1746 struct btrfs_key key; 1747 int ret = 0; 1748 struct inode *inode; 1749 1750 inode = read_one_inode(root, objectid); 1751 if (!inode) 1752 return -EIO; 1753 1754 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1755 key.type = BTRFS_ORPHAN_ITEM_KEY; 1756 key.offset = objectid; 1757 1758 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1759 1760 btrfs_release_path(path); 1761 if (ret == 0) { 1762 if (!inode->i_nlink) 1763 set_nlink(inode, 1); 1764 else 1765 inc_nlink(inode); 1766 ret = btrfs_update_inode(trans, root, inode); 1767 } else if (ret == -EEXIST) { 1768 ret = 0; 1769 } else { 1770 BUG(); /* Logic Error */ 1771 } 1772 iput(inode); 1773 1774 return ret; 1775 } 1776 1777 /* 1778 * when replaying the log for a directory, we only insert names 1779 * for inodes that actually exist. This means an fsync on a directory 1780 * does not implicitly fsync all the new files in it 1781 */ 1782 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1783 struct btrfs_root *root, 1784 u64 dirid, u64 index, 1785 char *name, int name_len, 1786 struct btrfs_key *location) 1787 { 1788 struct inode *inode; 1789 struct inode *dir; 1790 int ret; 1791 1792 inode = read_one_inode(root, location->objectid); 1793 if (!inode) 1794 return -ENOENT; 1795 1796 dir = read_one_inode(root, dirid); 1797 if (!dir) { 1798 iput(inode); 1799 return -EIO; 1800 } 1801 1802 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1803 name_len, 1, index); 1804 1805 /* FIXME, put inode into FIXUP list */ 1806 1807 iput(inode); 1808 iput(dir); 1809 return ret; 1810 } 1811 1812 /* 1813 * Return true if an inode reference exists in the log for the given name, 1814 * inode and parent inode. 1815 */ 1816 static bool name_in_log_ref(struct btrfs_root *log_root, 1817 const char *name, const int name_len, 1818 const u64 dirid, const u64 ino) 1819 { 1820 struct btrfs_key search_key; 1821 1822 search_key.objectid = ino; 1823 search_key.type = BTRFS_INODE_REF_KEY; 1824 search_key.offset = dirid; 1825 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1826 return true; 1827 1828 search_key.type = BTRFS_INODE_EXTREF_KEY; 1829 search_key.offset = btrfs_extref_hash(dirid, name, name_len); 1830 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1831 return true; 1832 1833 return false; 1834 } 1835 1836 /* 1837 * take a single entry in a log directory item and replay it into 1838 * the subvolume. 1839 * 1840 * if a conflicting item exists in the subdirectory already, 1841 * the inode it points to is unlinked and put into the link count 1842 * fix up tree. 1843 * 1844 * If a name from the log points to a file or directory that does 1845 * not exist in the FS, it is skipped. fsyncs on directories 1846 * do not force down inodes inside that directory, just changes to the 1847 * names or unlinks in a directory. 1848 * 1849 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1850 * non-existing inode) and 1 if the name was replayed. 1851 */ 1852 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1853 struct btrfs_root *root, 1854 struct btrfs_path *path, 1855 struct extent_buffer *eb, 1856 struct btrfs_dir_item *di, 1857 struct btrfs_key *key) 1858 { 1859 char *name; 1860 int name_len; 1861 struct btrfs_dir_item *dst_di; 1862 struct btrfs_key found_key; 1863 struct btrfs_key log_key; 1864 struct inode *dir; 1865 u8 log_type; 1866 int exists; 1867 int ret = 0; 1868 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1869 bool name_added = false; 1870 1871 dir = read_one_inode(root, key->objectid); 1872 if (!dir) 1873 return -EIO; 1874 1875 name_len = btrfs_dir_name_len(eb, di); 1876 name = kmalloc(name_len, GFP_NOFS); 1877 if (!name) { 1878 ret = -ENOMEM; 1879 goto out; 1880 } 1881 1882 log_type = btrfs_dir_type(eb, di); 1883 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1884 name_len); 1885 1886 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1887 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1888 if (exists == 0) 1889 exists = 1; 1890 else 1891 exists = 0; 1892 btrfs_release_path(path); 1893 1894 if (key->type == BTRFS_DIR_ITEM_KEY) { 1895 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1896 name, name_len, 1); 1897 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1898 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1899 key->objectid, 1900 key->offset, name, 1901 name_len, 1); 1902 } else { 1903 /* Corruption */ 1904 ret = -EINVAL; 1905 goto out; 1906 } 1907 if (IS_ERR_OR_NULL(dst_di)) { 1908 /* we need a sequence number to insert, so we only 1909 * do inserts for the BTRFS_DIR_INDEX_KEY types 1910 */ 1911 if (key->type != BTRFS_DIR_INDEX_KEY) 1912 goto out; 1913 goto insert; 1914 } 1915 1916 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1917 /* the existing item matches the logged item */ 1918 if (found_key.objectid == log_key.objectid && 1919 found_key.type == log_key.type && 1920 found_key.offset == log_key.offset && 1921 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1922 update_size = false; 1923 goto out; 1924 } 1925 1926 /* 1927 * don't drop the conflicting directory entry if the inode 1928 * for the new entry doesn't exist 1929 */ 1930 if (!exists) 1931 goto out; 1932 1933 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); 1934 if (ret) 1935 goto out; 1936 1937 if (key->type == BTRFS_DIR_INDEX_KEY) 1938 goto insert; 1939 out: 1940 btrfs_release_path(path); 1941 if (!ret && update_size) { 1942 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); 1943 ret = btrfs_update_inode(trans, root, dir); 1944 } 1945 kfree(name); 1946 iput(dir); 1947 if (!ret && name_added) 1948 ret = 1; 1949 return ret; 1950 1951 insert: 1952 if (name_in_log_ref(root->log_root, name, name_len, 1953 key->objectid, log_key.objectid)) { 1954 /* The dentry will be added later. */ 1955 ret = 0; 1956 update_size = false; 1957 goto out; 1958 } 1959 btrfs_release_path(path); 1960 ret = insert_one_name(trans, root, key->objectid, key->offset, 1961 name, name_len, &log_key); 1962 if (ret && ret != -ENOENT && ret != -EEXIST) 1963 goto out; 1964 if (!ret) 1965 name_added = true; 1966 update_size = false; 1967 ret = 0; 1968 goto out; 1969 } 1970 1971 /* 1972 * find all the names in a directory item and reconcile them into 1973 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1974 * one name in a directory item, but the same code gets used for 1975 * both directory index types 1976 */ 1977 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1978 struct btrfs_root *root, 1979 struct btrfs_path *path, 1980 struct extent_buffer *eb, int slot, 1981 struct btrfs_key *key) 1982 { 1983 int ret = 0; 1984 u32 item_size = btrfs_item_size_nr(eb, slot); 1985 struct btrfs_dir_item *di; 1986 int name_len; 1987 unsigned long ptr; 1988 unsigned long ptr_end; 1989 struct btrfs_path *fixup_path = NULL; 1990 1991 ptr = btrfs_item_ptr_offset(eb, slot); 1992 ptr_end = ptr + item_size; 1993 while (ptr < ptr_end) { 1994 di = (struct btrfs_dir_item *)ptr; 1995 name_len = btrfs_dir_name_len(eb, di); 1996 ret = replay_one_name(trans, root, path, eb, di, key); 1997 if (ret < 0) 1998 break; 1999 ptr = (unsigned long)(di + 1); 2000 ptr += name_len; 2001 2002 /* 2003 * If this entry refers to a non-directory (directories can not 2004 * have a link count > 1) and it was added in the transaction 2005 * that was not committed, make sure we fixup the link count of 2006 * the inode it the entry points to. Otherwise something like 2007 * the following would result in a directory pointing to an 2008 * inode with a wrong link that does not account for this dir 2009 * entry: 2010 * 2011 * mkdir testdir 2012 * touch testdir/foo 2013 * touch testdir/bar 2014 * sync 2015 * 2016 * ln testdir/bar testdir/bar_link 2017 * ln testdir/foo testdir/foo_link 2018 * xfs_io -c "fsync" testdir/bar 2019 * 2020 * <power failure> 2021 * 2022 * mount fs, log replay happens 2023 * 2024 * File foo would remain with a link count of 1 when it has two 2025 * entries pointing to it in the directory testdir. This would 2026 * make it impossible to ever delete the parent directory has 2027 * it would result in stale dentries that can never be deleted. 2028 */ 2029 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 2030 struct btrfs_key di_key; 2031 2032 if (!fixup_path) { 2033 fixup_path = btrfs_alloc_path(); 2034 if (!fixup_path) { 2035 ret = -ENOMEM; 2036 break; 2037 } 2038 } 2039 2040 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2041 ret = link_to_fixup_dir(trans, root, fixup_path, 2042 di_key.objectid); 2043 if (ret) 2044 break; 2045 } 2046 ret = 0; 2047 } 2048 btrfs_free_path(fixup_path); 2049 return ret; 2050 } 2051 2052 /* 2053 * directory replay has two parts. There are the standard directory 2054 * items in the log copied from the subvolume, and range items 2055 * created in the log while the subvolume was logged. 2056 * 2057 * The range items tell us which parts of the key space the log 2058 * is authoritative for. During replay, if a key in the subvolume 2059 * directory is in a logged range item, but not actually in the log 2060 * that means it was deleted from the directory before the fsync 2061 * and should be removed. 2062 */ 2063 static noinline int find_dir_range(struct btrfs_root *root, 2064 struct btrfs_path *path, 2065 u64 dirid, int key_type, 2066 u64 *start_ret, u64 *end_ret) 2067 { 2068 struct btrfs_key key; 2069 u64 found_end; 2070 struct btrfs_dir_log_item *item; 2071 int ret; 2072 int nritems; 2073 2074 if (*start_ret == (u64)-1) 2075 return 1; 2076 2077 key.objectid = dirid; 2078 key.type = key_type; 2079 key.offset = *start_ret; 2080 2081 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2082 if (ret < 0) 2083 goto out; 2084 if (ret > 0) { 2085 if (path->slots[0] == 0) 2086 goto out; 2087 path->slots[0]--; 2088 } 2089 if (ret != 0) 2090 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2091 2092 if (key.type != key_type || key.objectid != dirid) { 2093 ret = 1; 2094 goto next; 2095 } 2096 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2097 struct btrfs_dir_log_item); 2098 found_end = btrfs_dir_log_end(path->nodes[0], item); 2099 2100 if (*start_ret >= key.offset && *start_ret <= found_end) { 2101 ret = 0; 2102 *start_ret = key.offset; 2103 *end_ret = found_end; 2104 goto out; 2105 } 2106 ret = 1; 2107 next: 2108 /* check the next slot in the tree to see if it is a valid item */ 2109 nritems = btrfs_header_nritems(path->nodes[0]); 2110 path->slots[0]++; 2111 if (path->slots[0] >= nritems) { 2112 ret = btrfs_next_leaf(root, path); 2113 if (ret) 2114 goto out; 2115 } 2116 2117 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2118 2119 if (key.type != key_type || key.objectid != dirid) { 2120 ret = 1; 2121 goto out; 2122 } 2123 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2124 struct btrfs_dir_log_item); 2125 found_end = btrfs_dir_log_end(path->nodes[0], item); 2126 *start_ret = key.offset; 2127 *end_ret = found_end; 2128 ret = 0; 2129 out: 2130 btrfs_release_path(path); 2131 return ret; 2132 } 2133 2134 /* 2135 * this looks for a given directory item in the log. If the directory 2136 * item is not in the log, the item is removed and the inode it points 2137 * to is unlinked 2138 */ 2139 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 2140 struct btrfs_root *root, 2141 struct btrfs_root *log, 2142 struct btrfs_path *path, 2143 struct btrfs_path *log_path, 2144 struct inode *dir, 2145 struct btrfs_key *dir_key) 2146 { 2147 int ret; 2148 struct extent_buffer *eb; 2149 int slot; 2150 u32 item_size; 2151 struct btrfs_dir_item *di; 2152 struct btrfs_dir_item *log_di; 2153 int name_len; 2154 unsigned long ptr; 2155 unsigned long ptr_end; 2156 char *name; 2157 struct inode *inode; 2158 struct btrfs_key location; 2159 2160 again: 2161 eb = path->nodes[0]; 2162 slot = path->slots[0]; 2163 item_size = btrfs_item_size_nr(eb, slot); 2164 ptr = btrfs_item_ptr_offset(eb, slot); 2165 ptr_end = ptr + item_size; 2166 while (ptr < ptr_end) { 2167 di = (struct btrfs_dir_item *)ptr; 2168 name_len = btrfs_dir_name_len(eb, di); 2169 name = kmalloc(name_len, GFP_NOFS); 2170 if (!name) { 2171 ret = -ENOMEM; 2172 goto out; 2173 } 2174 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2175 name_len); 2176 log_di = NULL; 2177 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2178 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2179 dir_key->objectid, 2180 name, name_len, 0); 2181 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2182 log_di = btrfs_lookup_dir_index_item(trans, log, 2183 log_path, 2184 dir_key->objectid, 2185 dir_key->offset, 2186 name, name_len, 0); 2187 } 2188 if (!log_di || log_di == ERR_PTR(-ENOENT)) { 2189 btrfs_dir_item_key_to_cpu(eb, di, &location); 2190 btrfs_release_path(path); 2191 btrfs_release_path(log_path); 2192 inode = read_one_inode(root, location.objectid); 2193 if (!inode) { 2194 kfree(name); 2195 return -EIO; 2196 } 2197 2198 ret = link_to_fixup_dir(trans, root, 2199 path, location.objectid); 2200 if (ret) { 2201 kfree(name); 2202 iput(inode); 2203 goto out; 2204 } 2205 2206 inc_nlink(inode); 2207 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 2208 BTRFS_I(inode), name, name_len); 2209 if (!ret) 2210 ret = btrfs_run_delayed_items(trans); 2211 kfree(name); 2212 iput(inode); 2213 if (ret) 2214 goto out; 2215 2216 /* there might still be more names under this key 2217 * check and repeat if required 2218 */ 2219 ret = btrfs_search_slot(NULL, root, dir_key, path, 2220 0, 0); 2221 if (ret == 0) 2222 goto again; 2223 ret = 0; 2224 goto out; 2225 } else if (IS_ERR(log_di)) { 2226 kfree(name); 2227 return PTR_ERR(log_di); 2228 } 2229 btrfs_release_path(log_path); 2230 kfree(name); 2231 2232 ptr = (unsigned long)(di + 1); 2233 ptr += name_len; 2234 } 2235 ret = 0; 2236 out: 2237 btrfs_release_path(path); 2238 btrfs_release_path(log_path); 2239 return ret; 2240 } 2241 2242 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2243 struct btrfs_root *root, 2244 struct btrfs_root *log, 2245 struct btrfs_path *path, 2246 const u64 ino) 2247 { 2248 struct btrfs_key search_key; 2249 struct btrfs_path *log_path; 2250 int i; 2251 int nritems; 2252 int ret; 2253 2254 log_path = btrfs_alloc_path(); 2255 if (!log_path) 2256 return -ENOMEM; 2257 2258 search_key.objectid = ino; 2259 search_key.type = BTRFS_XATTR_ITEM_KEY; 2260 search_key.offset = 0; 2261 again: 2262 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2263 if (ret < 0) 2264 goto out; 2265 process_leaf: 2266 nritems = btrfs_header_nritems(path->nodes[0]); 2267 for (i = path->slots[0]; i < nritems; i++) { 2268 struct btrfs_key key; 2269 struct btrfs_dir_item *di; 2270 struct btrfs_dir_item *log_di; 2271 u32 total_size; 2272 u32 cur; 2273 2274 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2275 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2276 ret = 0; 2277 goto out; 2278 } 2279 2280 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2281 total_size = btrfs_item_size_nr(path->nodes[0], i); 2282 cur = 0; 2283 while (cur < total_size) { 2284 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2285 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2286 u32 this_len = sizeof(*di) + name_len + data_len; 2287 char *name; 2288 2289 name = kmalloc(name_len, GFP_NOFS); 2290 if (!name) { 2291 ret = -ENOMEM; 2292 goto out; 2293 } 2294 read_extent_buffer(path->nodes[0], name, 2295 (unsigned long)(di + 1), name_len); 2296 2297 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2298 name, name_len, 0); 2299 btrfs_release_path(log_path); 2300 if (!log_di) { 2301 /* Doesn't exist in log tree, so delete it. */ 2302 btrfs_release_path(path); 2303 di = btrfs_lookup_xattr(trans, root, path, ino, 2304 name, name_len, -1); 2305 kfree(name); 2306 if (IS_ERR(di)) { 2307 ret = PTR_ERR(di); 2308 goto out; 2309 } 2310 ASSERT(di); 2311 ret = btrfs_delete_one_dir_name(trans, root, 2312 path, di); 2313 if (ret) 2314 goto out; 2315 btrfs_release_path(path); 2316 search_key = key; 2317 goto again; 2318 } 2319 kfree(name); 2320 if (IS_ERR(log_di)) { 2321 ret = PTR_ERR(log_di); 2322 goto out; 2323 } 2324 cur += this_len; 2325 di = (struct btrfs_dir_item *)((char *)di + this_len); 2326 } 2327 } 2328 ret = btrfs_next_leaf(root, path); 2329 if (ret > 0) 2330 ret = 0; 2331 else if (ret == 0) 2332 goto process_leaf; 2333 out: 2334 btrfs_free_path(log_path); 2335 btrfs_release_path(path); 2336 return ret; 2337 } 2338 2339 2340 /* 2341 * deletion replay happens before we copy any new directory items 2342 * out of the log or out of backreferences from inodes. It 2343 * scans the log to find ranges of keys that log is authoritative for, 2344 * and then scans the directory to find items in those ranges that are 2345 * not present in the log. 2346 * 2347 * Anything we don't find in the log is unlinked and removed from the 2348 * directory. 2349 */ 2350 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2351 struct btrfs_root *root, 2352 struct btrfs_root *log, 2353 struct btrfs_path *path, 2354 u64 dirid, int del_all) 2355 { 2356 u64 range_start; 2357 u64 range_end; 2358 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2359 int ret = 0; 2360 struct btrfs_key dir_key; 2361 struct btrfs_key found_key; 2362 struct btrfs_path *log_path; 2363 struct inode *dir; 2364 2365 dir_key.objectid = dirid; 2366 dir_key.type = BTRFS_DIR_ITEM_KEY; 2367 log_path = btrfs_alloc_path(); 2368 if (!log_path) 2369 return -ENOMEM; 2370 2371 dir = read_one_inode(root, dirid); 2372 /* it isn't an error if the inode isn't there, that can happen 2373 * because we replay the deletes before we copy in the inode item 2374 * from the log 2375 */ 2376 if (!dir) { 2377 btrfs_free_path(log_path); 2378 return 0; 2379 } 2380 again: 2381 range_start = 0; 2382 range_end = 0; 2383 while (1) { 2384 if (del_all) 2385 range_end = (u64)-1; 2386 else { 2387 ret = find_dir_range(log, path, dirid, key_type, 2388 &range_start, &range_end); 2389 if (ret != 0) 2390 break; 2391 } 2392 2393 dir_key.offset = range_start; 2394 while (1) { 2395 int nritems; 2396 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2397 0, 0); 2398 if (ret < 0) 2399 goto out; 2400 2401 nritems = btrfs_header_nritems(path->nodes[0]); 2402 if (path->slots[0] >= nritems) { 2403 ret = btrfs_next_leaf(root, path); 2404 if (ret == 1) 2405 break; 2406 else if (ret < 0) 2407 goto out; 2408 } 2409 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2410 path->slots[0]); 2411 if (found_key.objectid != dirid || 2412 found_key.type != dir_key.type) 2413 goto next_type; 2414 2415 if (found_key.offset > range_end) 2416 break; 2417 2418 ret = check_item_in_log(trans, root, log, path, 2419 log_path, dir, 2420 &found_key); 2421 if (ret) 2422 goto out; 2423 if (found_key.offset == (u64)-1) 2424 break; 2425 dir_key.offset = found_key.offset + 1; 2426 } 2427 btrfs_release_path(path); 2428 if (range_end == (u64)-1) 2429 break; 2430 range_start = range_end + 1; 2431 } 2432 2433 next_type: 2434 ret = 0; 2435 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2436 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2437 dir_key.type = BTRFS_DIR_INDEX_KEY; 2438 btrfs_release_path(path); 2439 goto again; 2440 } 2441 out: 2442 btrfs_release_path(path); 2443 btrfs_free_path(log_path); 2444 iput(dir); 2445 return ret; 2446 } 2447 2448 /* 2449 * the process_func used to replay items from the log tree. This 2450 * gets called in two different stages. The first stage just looks 2451 * for inodes and makes sure they are all copied into the subvolume. 2452 * 2453 * The second stage copies all the other item types from the log into 2454 * the subvolume. The two stage approach is slower, but gets rid of 2455 * lots of complexity around inodes referencing other inodes that exist 2456 * only in the log (references come from either directory items or inode 2457 * back refs). 2458 */ 2459 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2460 struct walk_control *wc, u64 gen, int level) 2461 { 2462 int nritems; 2463 struct btrfs_path *path; 2464 struct btrfs_root *root = wc->replay_dest; 2465 struct btrfs_key key; 2466 int i; 2467 int ret; 2468 2469 ret = btrfs_read_buffer(eb, gen, level, NULL); 2470 if (ret) 2471 return ret; 2472 2473 level = btrfs_header_level(eb); 2474 2475 if (level != 0) 2476 return 0; 2477 2478 path = btrfs_alloc_path(); 2479 if (!path) 2480 return -ENOMEM; 2481 2482 nritems = btrfs_header_nritems(eb); 2483 for (i = 0; i < nritems; i++) { 2484 btrfs_item_key_to_cpu(eb, &key, i); 2485 2486 /* inode keys are done during the first stage */ 2487 if (key.type == BTRFS_INODE_ITEM_KEY && 2488 wc->stage == LOG_WALK_REPLAY_INODES) { 2489 struct btrfs_inode_item *inode_item; 2490 u32 mode; 2491 2492 inode_item = btrfs_item_ptr(eb, i, 2493 struct btrfs_inode_item); 2494 /* 2495 * If we have a tmpfile (O_TMPFILE) that got fsync'ed 2496 * and never got linked before the fsync, skip it, as 2497 * replaying it is pointless since it would be deleted 2498 * later. We skip logging tmpfiles, but it's always 2499 * possible we are replaying a log created with a kernel 2500 * that used to log tmpfiles. 2501 */ 2502 if (btrfs_inode_nlink(eb, inode_item) == 0) { 2503 wc->ignore_cur_inode = true; 2504 continue; 2505 } else { 2506 wc->ignore_cur_inode = false; 2507 } 2508 ret = replay_xattr_deletes(wc->trans, root, log, 2509 path, key.objectid); 2510 if (ret) 2511 break; 2512 mode = btrfs_inode_mode(eb, inode_item); 2513 if (S_ISDIR(mode)) { 2514 ret = replay_dir_deletes(wc->trans, 2515 root, log, path, key.objectid, 0); 2516 if (ret) 2517 break; 2518 } 2519 ret = overwrite_item(wc->trans, root, path, 2520 eb, i, &key); 2521 if (ret) 2522 break; 2523 2524 /* 2525 * Before replaying extents, truncate the inode to its 2526 * size. We need to do it now and not after log replay 2527 * because before an fsync we can have prealloc extents 2528 * added beyond the inode's i_size. If we did it after, 2529 * through orphan cleanup for example, we would drop 2530 * those prealloc extents just after replaying them. 2531 */ 2532 if (S_ISREG(mode)) { 2533 struct inode *inode; 2534 u64 from; 2535 2536 inode = read_one_inode(root, key.objectid); 2537 if (!inode) { 2538 ret = -EIO; 2539 break; 2540 } 2541 from = ALIGN(i_size_read(inode), 2542 root->fs_info->sectorsize); 2543 ret = btrfs_drop_extents(wc->trans, root, inode, 2544 from, (u64)-1, 1); 2545 if (!ret) { 2546 /* Update the inode's nbytes. */ 2547 ret = btrfs_update_inode(wc->trans, 2548 root, inode); 2549 } 2550 iput(inode); 2551 if (ret) 2552 break; 2553 } 2554 2555 ret = link_to_fixup_dir(wc->trans, root, 2556 path, key.objectid); 2557 if (ret) 2558 break; 2559 } 2560 2561 if (wc->ignore_cur_inode) 2562 continue; 2563 2564 if (key.type == BTRFS_DIR_INDEX_KEY && 2565 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2566 ret = replay_one_dir_item(wc->trans, root, path, 2567 eb, i, &key); 2568 if (ret) 2569 break; 2570 } 2571 2572 if (wc->stage < LOG_WALK_REPLAY_ALL) 2573 continue; 2574 2575 /* these keys are simply copied */ 2576 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2577 ret = overwrite_item(wc->trans, root, path, 2578 eb, i, &key); 2579 if (ret) 2580 break; 2581 } else if (key.type == BTRFS_INODE_REF_KEY || 2582 key.type == BTRFS_INODE_EXTREF_KEY) { 2583 ret = add_inode_ref(wc->trans, root, log, path, 2584 eb, i, &key); 2585 if (ret && ret != -ENOENT) 2586 break; 2587 ret = 0; 2588 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2589 ret = replay_one_extent(wc->trans, root, path, 2590 eb, i, &key); 2591 if (ret) 2592 break; 2593 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2594 ret = replay_one_dir_item(wc->trans, root, path, 2595 eb, i, &key); 2596 if (ret) 2597 break; 2598 } 2599 } 2600 btrfs_free_path(path); 2601 return ret; 2602 } 2603 2604 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2605 struct btrfs_root *root, 2606 struct btrfs_path *path, int *level, 2607 struct walk_control *wc) 2608 { 2609 struct btrfs_fs_info *fs_info = root->fs_info; 2610 u64 root_owner; 2611 u64 bytenr; 2612 u64 ptr_gen; 2613 struct extent_buffer *next; 2614 struct extent_buffer *cur; 2615 struct extent_buffer *parent; 2616 u32 blocksize; 2617 int ret = 0; 2618 2619 WARN_ON(*level < 0); 2620 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2621 2622 while (*level > 0) { 2623 struct btrfs_key first_key; 2624 2625 WARN_ON(*level < 0); 2626 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2627 cur = path->nodes[*level]; 2628 2629 WARN_ON(btrfs_header_level(cur) != *level); 2630 2631 if (path->slots[*level] >= 2632 btrfs_header_nritems(cur)) 2633 break; 2634 2635 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2636 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2637 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]); 2638 blocksize = fs_info->nodesize; 2639 2640 parent = path->nodes[*level]; 2641 root_owner = btrfs_header_owner(parent); 2642 2643 next = btrfs_find_create_tree_block(fs_info, bytenr); 2644 if (IS_ERR(next)) 2645 return PTR_ERR(next); 2646 2647 if (*level == 1) { 2648 ret = wc->process_func(root, next, wc, ptr_gen, 2649 *level - 1); 2650 if (ret) { 2651 free_extent_buffer(next); 2652 return ret; 2653 } 2654 2655 path->slots[*level]++; 2656 if (wc->free) { 2657 ret = btrfs_read_buffer(next, ptr_gen, 2658 *level - 1, &first_key); 2659 if (ret) { 2660 free_extent_buffer(next); 2661 return ret; 2662 } 2663 2664 if (trans) { 2665 btrfs_tree_lock(next); 2666 btrfs_set_lock_blocking(next); 2667 clean_tree_block(fs_info, next); 2668 btrfs_wait_tree_block_writeback(next); 2669 btrfs_tree_unlock(next); 2670 } else { 2671 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2672 clear_extent_buffer_dirty(next); 2673 } 2674 2675 WARN_ON(root_owner != 2676 BTRFS_TREE_LOG_OBJECTID); 2677 ret = btrfs_free_and_pin_reserved_extent( 2678 fs_info, bytenr, 2679 blocksize); 2680 if (ret) { 2681 free_extent_buffer(next); 2682 return ret; 2683 } 2684 } 2685 free_extent_buffer(next); 2686 continue; 2687 } 2688 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key); 2689 if (ret) { 2690 free_extent_buffer(next); 2691 return ret; 2692 } 2693 2694 WARN_ON(*level <= 0); 2695 if (path->nodes[*level-1]) 2696 free_extent_buffer(path->nodes[*level-1]); 2697 path->nodes[*level-1] = next; 2698 *level = btrfs_header_level(next); 2699 path->slots[*level] = 0; 2700 cond_resched(); 2701 } 2702 WARN_ON(*level < 0); 2703 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2704 2705 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2706 2707 cond_resched(); 2708 return 0; 2709 } 2710 2711 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2712 struct btrfs_root *root, 2713 struct btrfs_path *path, int *level, 2714 struct walk_control *wc) 2715 { 2716 struct btrfs_fs_info *fs_info = root->fs_info; 2717 u64 root_owner; 2718 int i; 2719 int slot; 2720 int ret; 2721 2722 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2723 slot = path->slots[i]; 2724 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2725 path->slots[i]++; 2726 *level = i; 2727 WARN_ON(*level == 0); 2728 return 0; 2729 } else { 2730 struct extent_buffer *parent; 2731 if (path->nodes[*level] == root->node) 2732 parent = path->nodes[*level]; 2733 else 2734 parent = path->nodes[*level + 1]; 2735 2736 root_owner = btrfs_header_owner(parent); 2737 ret = wc->process_func(root, path->nodes[*level], wc, 2738 btrfs_header_generation(path->nodes[*level]), 2739 *level); 2740 if (ret) 2741 return ret; 2742 2743 if (wc->free) { 2744 struct extent_buffer *next; 2745 2746 next = path->nodes[*level]; 2747 2748 if (trans) { 2749 btrfs_tree_lock(next); 2750 btrfs_set_lock_blocking(next); 2751 clean_tree_block(fs_info, next); 2752 btrfs_wait_tree_block_writeback(next); 2753 btrfs_tree_unlock(next); 2754 } else { 2755 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2756 clear_extent_buffer_dirty(next); 2757 } 2758 2759 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2760 ret = btrfs_free_and_pin_reserved_extent( 2761 fs_info, 2762 path->nodes[*level]->start, 2763 path->nodes[*level]->len); 2764 if (ret) 2765 return ret; 2766 } 2767 free_extent_buffer(path->nodes[*level]); 2768 path->nodes[*level] = NULL; 2769 *level = i + 1; 2770 } 2771 } 2772 return 1; 2773 } 2774 2775 /* 2776 * drop the reference count on the tree rooted at 'snap'. This traverses 2777 * the tree freeing any blocks that have a ref count of zero after being 2778 * decremented. 2779 */ 2780 static int walk_log_tree(struct btrfs_trans_handle *trans, 2781 struct btrfs_root *log, struct walk_control *wc) 2782 { 2783 struct btrfs_fs_info *fs_info = log->fs_info; 2784 int ret = 0; 2785 int wret; 2786 int level; 2787 struct btrfs_path *path; 2788 int orig_level; 2789 2790 path = btrfs_alloc_path(); 2791 if (!path) 2792 return -ENOMEM; 2793 2794 level = btrfs_header_level(log->node); 2795 orig_level = level; 2796 path->nodes[level] = log->node; 2797 extent_buffer_get(log->node); 2798 path->slots[level] = 0; 2799 2800 while (1) { 2801 wret = walk_down_log_tree(trans, log, path, &level, wc); 2802 if (wret > 0) 2803 break; 2804 if (wret < 0) { 2805 ret = wret; 2806 goto out; 2807 } 2808 2809 wret = walk_up_log_tree(trans, log, path, &level, wc); 2810 if (wret > 0) 2811 break; 2812 if (wret < 0) { 2813 ret = wret; 2814 goto out; 2815 } 2816 } 2817 2818 /* was the root node processed? if not, catch it here */ 2819 if (path->nodes[orig_level]) { 2820 ret = wc->process_func(log, path->nodes[orig_level], wc, 2821 btrfs_header_generation(path->nodes[orig_level]), 2822 orig_level); 2823 if (ret) 2824 goto out; 2825 if (wc->free) { 2826 struct extent_buffer *next; 2827 2828 next = path->nodes[orig_level]; 2829 2830 if (trans) { 2831 btrfs_tree_lock(next); 2832 btrfs_set_lock_blocking(next); 2833 clean_tree_block(fs_info, next); 2834 btrfs_wait_tree_block_writeback(next); 2835 btrfs_tree_unlock(next); 2836 } else { 2837 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2838 clear_extent_buffer_dirty(next); 2839 } 2840 2841 WARN_ON(log->root_key.objectid != 2842 BTRFS_TREE_LOG_OBJECTID); 2843 ret = btrfs_free_and_pin_reserved_extent(fs_info, 2844 next->start, next->len); 2845 if (ret) 2846 goto out; 2847 } 2848 } 2849 2850 out: 2851 btrfs_free_path(path); 2852 return ret; 2853 } 2854 2855 /* 2856 * helper function to update the item for a given subvolumes log root 2857 * in the tree of log roots 2858 */ 2859 static int update_log_root(struct btrfs_trans_handle *trans, 2860 struct btrfs_root *log) 2861 { 2862 struct btrfs_fs_info *fs_info = log->fs_info; 2863 int ret; 2864 2865 if (log->log_transid == 1) { 2866 /* insert root item on the first sync */ 2867 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2868 &log->root_key, &log->root_item); 2869 } else { 2870 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2871 &log->root_key, &log->root_item); 2872 } 2873 return ret; 2874 } 2875 2876 static void wait_log_commit(struct btrfs_root *root, int transid) 2877 { 2878 DEFINE_WAIT(wait); 2879 int index = transid % 2; 2880 2881 /* 2882 * we only allow two pending log transactions at a time, 2883 * so we know that if ours is more than 2 older than the 2884 * current transaction, we're done 2885 */ 2886 for (;;) { 2887 prepare_to_wait(&root->log_commit_wait[index], 2888 &wait, TASK_UNINTERRUPTIBLE); 2889 2890 if (!(root->log_transid_committed < transid && 2891 atomic_read(&root->log_commit[index]))) 2892 break; 2893 2894 mutex_unlock(&root->log_mutex); 2895 schedule(); 2896 mutex_lock(&root->log_mutex); 2897 } 2898 finish_wait(&root->log_commit_wait[index], &wait); 2899 } 2900 2901 static void wait_for_writer(struct btrfs_root *root) 2902 { 2903 DEFINE_WAIT(wait); 2904 2905 for (;;) { 2906 prepare_to_wait(&root->log_writer_wait, &wait, 2907 TASK_UNINTERRUPTIBLE); 2908 if (!atomic_read(&root->log_writers)) 2909 break; 2910 2911 mutex_unlock(&root->log_mutex); 2912 schedule(); 2913 mutex_lock(&root->log_mutex); 2914 } 2915 finish_wait(&root->log_writer_wait, &wait); 2916 } 2917 2918 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2919 struct btrfs_log_ctx *ctx) 2920 { 2921 if (!ctx) 2922 return; 2923 2924 mutex_lock(&root->log_mutex); 2925 list_del_init(&ctx->list); 2926 mutex_unlock(&root->log_mutex); 2927 } 2928 2929 /* 2930 * Invoked in log mutex context, or be sure there is no other task which 2931 * can access the list. 2932 */ 2933 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 2934 int index, int error) 2935 { 2936 struct btrfs_log_ctx *ctx; 2937 struct btrfs_log_ctx *safe; 2938 2939 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 2940 list_del_init(&ctx->list); 2941 ctx->log_ret = error; 2942 } 2943 2944 INIT_LIST_HEAD(&root->log_ctxs[index]); 2945 } 2946 2947 /* 2948 * btrfs_sync_log does sends a given tree log down to the disk and 2949 * updates the super blocks to record it. When this call is done, 2950 * you know that any inodes previously logged are safely on disk only 2951 * if it returns 0. 2952 * 2953 * Any other return value means you need to call btrfs_commit_transaction. 2954 * Some of the edge cases for fsyncing directories that have had unlinks 2955 * or renames done in the past mean that sometimes the only safe 2956 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 2957 * that has happened. 2958 */ 2959 int btrfs_sync_log(struct btrfs_trans_handle *trans, 2960 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 2961 { 2962 int index1; 2963 int index2; 2964 int mark; 2965 int ret; 2966 struct btrfs_fs_info *fs_info = root->fs_info; 2967 struct btrfs_root *log = root->log_root; 2968 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 2969 int log_transid = 0; 2970 struct btrfs_log_ctx root_log_ctx; 2971 struct blk_plug plug; 2972 2973 mutex_lock(&root->log_mutex); 2974 log_transid = ctx->log_transid; 2975 if (root->log_transid_committed >= log_transid) { 2976 mutex_unlock(&root->log_mutex); 2977 return ctx->log_ret; 2978 } 2979 2980 index1 = log_transid % 2; 2981 if (atomic_read(&root->log_commit[index1])) { 2982 wait_log_commit(root, log_transid); 2983 mutex_unlock(&root->log_mutex); 2984 return ctx->log_ret; 2985 } 2986 ASSERT(log_transid == root->log_transid); 2987 atomic_set(&root->log_commit[index1], 1); 2988 2989 /* wait for previous tree log sync to complete */ 2990 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 2991 wait_log_commit(root, log_transid - 1); 2992 2993 while (1) { 2994 int batch = atomic_read(&root->log_batch); 2995 /* when we're on an ssd, just kick the log commit out */ 2996 if (!btrfs_test_opt(fs_info, SSD) && 2997 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 2998 mutex_unlock(&root->log_mutex); 2999 schedule_timeout_uninterruptible(1); 3000 mutex_lock(&root->log_mutex); 3001 } 3002 wait_for_writer(root); 3003 if (batch == atomic_read(&root->log_batch)) 3004 break; 3005 } 3006 3007 /* bail out if we need to do a full commit */ 3008 if (btrfs_need_log_full_commit(fs_info, trans)) { 3009 ret = -EAGAIN; 3010 mutex_unlock(&root->log_mutex); 3011 goto out; 3012 } 3013 3014 if (log_transid % 2 == 0) 3015 mark = EXTENT_DIRTY; 3016 else 3017 mark = EXTENT_NEW; 3018 3019 /* we start IO on all the marked extents here, but we don't actually 3020 * wait for them until later. 3021 */ 3022 blk_start_plug(&plug); 3023 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 3024 if (ret) { 3025 blk_finish_plug(&plug); 3026 btrfs_abort_transaction(trans, ret); 3027 btrfs_set_log_full_commit(fs_info, trans); 3028 mutex_unlock(&root->log_mutex); 3029 goto out; 3030 } 3031 3032 btrfs_set_root_node(&log->root_item, log->node); 3033 3034 root->log_transid++; 3035 log->log_transid = root->log_transid; 3036 root->log_start_pid = 0; 3037 /* 3038 * IO has been started, blocks of the log tree have WRITTEN flag set 3039 * in their headers. new modifications of the log will be written to 3040 * new positions. so it's safe to allow log writers to go in. 3041 */ 3042 mutex_unlock(&root->log_mutex); 3043 3044 btrfs_init_log_ctx(&root_log_ctx, NULL); 3045 3046 mutex_lock(&log_root_tree->log_mutex); 3047 atomic_inc(&log_root_tree->log_batch); 3048 atomic_inc(&log_root_tree->log_writers); 3049 3050 index2 = log_root_tree->log_transid % 2; 3051 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3052 root_log_ctx.log_transid = log_root_tree->log_transid; 3053 3054 mutex_unlock(&log_root_tree->log_mutex); 3055 3056 ret = update_log_root(trans, log); 3057 3058 mutex_lock(&log_root_tree->log_mutex); 3059 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 3060 /* atomic_dec_and_test implies a barrier */ 3061 cond_wake_up_nomb(&log_root_tree->log_writer_wait); 3062 } 3063 3064 if (ret) { 3065 if (!list_empty(&root_log_ctx.list)) 3066 list_del_init(&root_log_ctx.list); 3067 3068 blk_finish_plug(&plug); 3069 btrfs_set_log_full_commit(fs_info, trans); 3070 3071 if (ret != -ENOSPC) { 3072 btrfs_abort_transaction(trans, ret); 3073 mutex_unlock(&log_root_tree->log_mutex); 3074 goto out; 3075 } 3076 btrfs_wait_tree_log_extents(log, mark); 3077 mutex_unlock(&log_root_tree->log_mutex); 3078 ret = -EAGAIN; 3079 goto out; 3080 } 3081 3082 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 3083 blk_finish_plug(&plug); 3084 list_del_init(&root_log_ctx.list); 3085 mutex_unlock(&log_root_tree->log_mutex); 3086 ret = root_log_ctx.log_ret; 3087 goto out; 3088 } 3089 3090 index2 = root_log_ctx.log_transid % 2; 3091 if (atomic_read(&log_root_tree->log_commit[index2])) { 3092 blk_finish_plug(&plug); 3093 ret = btrfs_wait_tree_log_extents(log, mark); 3094 wait_log_commit(log_root_tree, 3095 root_log_ctx.log_transid); 3096 mutex_unlock(&log_root_tree->log_mutex); 3097 if (!ret) 3098 ret = root_log_ctx.log_ret; 3099 goto out; 3100 } 3101 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3102 atomic_set(&log_root_tree->log_commit[index2], 1); 3103 3104 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 3105 wait_log_commit(log_root_tree, 3106 root_log_ctx.log_transid - 1); 3107 } 3108 3109 wait_for_writer(log_root_tree); 3110 3111 /* 3112 * now that we've moved on to the tree of log tree roots, 3113 * check the full commit flag again 3114 */ 3115 if (btrfs_need_log_full_commit(fs_info, trans)) { 3116 blk_finish_plug(&plug); 3117 btrfs_wait_tree_log_extents(log, mark); 3118 mutex_unlock(&log_root_tree->log_mutex); 3119 ret = -EAGAIN; 3120 goto out_wake_log_root; 3121 } 3122 3123 ret = btrfs_write_marked_extents(fs_info, 3124 &log_root_tree->dirty_log_pages, 3125 EXTENT_DIRTY | EXTENT_NEW); 3126 blk_finish_plug(&plug); 3127 if (ret) { 3128 btrfs_set_log_full_commit(fs_info, trans); 3129 btrfs_abort_transaction(trans, ret); 3130 mutex_unlock(&log_root_tree->log_mutex); 3131 goto out_wake_log_root; 3132 } 3133 ret = btrfs_wait_tree_log_extents(log, mark); 3134 if (!ret) 3135 ret = btrfs_wait_tree_log_extents(log_root_tree, 3136 EXTENT_NEW | EXTENT_DIRTY); 3137 if (ret) { 3138 btrfs_set_log_full_commit(fs_info, trans); 3139 mutex_unlock(&log_root_tree->log_mutex); 3140 goto out_wake_log_root; 3141 } 3142 3143 btrfs_set_super_log_root(fs_info->super_for_commit, 3144 log_root_tree->node->start); 3145 btrfs_set_super_log_root_level(fs_info->super_for_commit, 3146 btrfs_header_level(log_root_tree->node)); 3147 3148 log_root_tree->log_transid++; 3149 mutex_unlock(&log_root_tree->log_mutex); 3150 3151 /* 3152 * Nobody else is going to jump in and write the ctree 3153 * super here because the log_commit atomic below is protecting 3154 * us. We must be called with a transaction handle pinning 3155 * the running transaction open, so a full commit can't hop 3156 * in and cause problems either. 3157 */ 3158 ret = write_all_supers(fs_info, 1); 3159 if (ret) { 3160 btrfs_set_log_full_commit(fs_info, trans); 3161 btrfs_abort_transaction(trans, ret); 3162 goto out_wake_log_root; 3163 } 3164 3165 mutex_lock(&root->log_mutex); 3166 if (root->last_log_commit < log_transid) 3167 root->last_log_commit = log_transid; 3168 mutex_unlock(&root->log_mutex); 3169 3170 out_wake_log_root: 3171 mutex_lock(&log_root_tree->log_mutex); 3172 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 3173 3174 log_root_tree->log_transid_committed++; 3175 atomic_set(&log_root_tree->log_commit[index2], 0); 3176 mutex_unlock(&log_root_tree->log_mutex); 3177 3178 /* 3179 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3180 * all the updates above are seen by the woken threads. It might not be 3181 * necessary, but proving that seems to be hard. 3182 */ 3183 cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3184 out: 3185 mutex_lock(&root->log_mutex); 3186 btrfs_remove_all_log_ctxs(root, index1, ret); 3187 root->log_transid_committed++; 3188 atomic_set(&root->log_commit[index1], 0); 3189 mutex_unlock(&root->log_mutex); 3190 3191 /* 3192 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3193 * all the updates above are seen by the woken threads. It might not be 3194 * necessary, but proving that seems to be hard. 3195 */ 3196 cond_wake_up(&root->log_commit_wait[index1]); 3197 return ret; 3198 } 3199 3200 static void free_log_tree(struct btrfs_trans_handle *trans, 3201 struct btrfs_root *log) 3202 { 3203 int ret; 3204 struct walk_control wc = { 3205 .free = 1, 3206 .process_func = process_one_buffer 3207 }; 3208 3209 ret = walk_log_tree(trans, log, &wc); 3210 if (ret) { 3211 if (trans) 3212 btrfs_abort_transaction(trans, ret); 3213 else 3214 btrfs_handle_fs_error(log->fs_info, ret, NULL); 3215 } 3216 3217 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, 3218 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); 3219 free_extent_buffer(log->node); 3220 kfree(log); 3221 } 3222 3223 /* 3224 * free all the extents used by the tree log. This should be called 3225 * at commit time of the full transaction 3226 */ 3227 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3228 { 3229 if (root->log_root) { 3230 free_log_tree(trans, root->log_root); 3231 root->log_root = NULL; 3232 } 3233 return 0; 3234 } 3235 3236 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3237 struct btrfs_fs_info *fs_info) 3238 { 3239 if (fs_info->log_root_tree) { 3240 free_log_tree(trans, fs_info->log_root_tree); 3241 fs_info->log_root_tree = NULL; 3242 } 3243 return 0; 3244 } 3245 3246 /* 3247 * If both a file and directory are logged, and unlinks or renames are 3248 * mixed in, we have a few interesting corners: 3249 * 3250 * create file X in dir Y 3251 * link file X to X.link in dir Y 3252 * fsync file X 3253 * unlink file X but leave X.link 3254 * fsync dir Y 3255 * 3256 * After a crash we would expect only X.link to exist. But file X 3257 * didn't get fsync'd again so the log has back refs for X and X.link. 3258 * 3259 * We solve this by removing directory entries and inode backrefs from the 3260 * log when a file that was logged in the current transaction is 3261 * unlinked. Any later fsync will include the updated log entries, and 3262 * we'll be able to reconstruct the proper directory items from backrefs. 3263 * 3264 * This optimizations allows us to avoid relogging the entire inode 3265 * or the entire directory. 3266 */ 3267 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3268 struct btrfs_root *root, 3269 const char *name, int name_len, 3270 struct btrfs_inode *dir, u64 index) 3271 { 3272 struct btrfs_root *log; 3273 struct btrfs_dir_item *di; 3274 struct btrfs_path *path; 3275 int ret; 3276 int err = 0; 3277 int bytes_del = 0; 3278 u64 dir_ino = btrfs_ino(dir); 3279 3280 if (dir->logged_trans < trans->transid) 3281 return 0; 3282 3283 ret = join_running_log_trans(root); 3284 if (ret) 3285 return 0; 3286 3287 mutex_lock(&dir->log_mutex); 3288 3289 log = root->log_root; 3290 path = btrfs_alloc_path(); 3291 if (!path) { 3292 err = -ENOMEM; 3293 goto out_unlock; 3294 } 3295 3296 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3297 name, name_len, -1); 3298 if (IS_ERR(di)) { 3299 err = PTR_ERR(di); 3300 goto fail; 3301 } 3302 if (di) { 3303 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3304 bytes_del += name_len; 3305 if (ret) { 3306 err = ret; 3307 goto fail; 3308 } 3309 } 3310 btrfs_release_path(path); 3311 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3312 index, name, name_len, -1); 3313 if (IS_ERR(di)) { 3314 err = PTR_ERR(di); 3315 goto fail; 3316 } 3317 if (di) { 3318 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3319 bytes_del += name_len; 3320 if (ret) { 3321 err = ret; 3322 goto fail; 3323 } 3324 } 3325 3326 /* update the directory size in the log to reflect the names 3327 * we have removed 3328 */ 3329 if (bytes_del) { 3330 struct btrfs_key key; 3331 3332 key.objectid = dir_ino; 3333 key.offset = 0; 3334 key.type = BTRFS_INODE_ITEM_KEY; 3335 btrfs_release_path(path); 3336 3337 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 3338 if (ret < 0) { 3339 err = ret; 3340 goto fail; 3341 } 3342 if (ret == 0) { 3343 struct btrfs_inode_item *item; 3344 u64 i_size; 3345 3346 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3347 struct btrfs_inode_item); 3348 i_size = btrfs_inode_size(path->nodes[0], item); 3349 if (i_size > bytes_del) 3350 i_size -= bytes_del; 3351 else 3352 i_size = 0; 3353 btrfs_set_inode_size(path->nodes[0], item, i_size); 3354 btrfs_mark_buffer_dirty(path->nodes[0]); 3355 } else 3356 ret = 0; 3357 btrfs_release_path(path); 3358 } 3359 fail: 3360 btrfs_free_path(path); 3361 out_unlock: 3362 mutex_unlock(&dir->log_mutex); 3363 if (ret == -ENOSPC) { 3364 btrfs_set_log_full_commit(root->fs_info, trans); 3365 ret = 0; 3366 } else if (ret < 0) 3367 btrfs_abort_transaction(trans, ret); 3368 3369 btrfs_end_log_trans(root); 3370 3371 return err; 3372 } 3373 3374 /* see comments for btrfs_del_dir_entries_in_log */ 3375 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3376 struct btrfs_root *root, 3377 const char *name, int name_len, 3378 struct btrfs_inode *inode, u64 dirid) 3379 { 3380 struct btrfs_fs_info *fs_info = root->fs_info; 3381 struct btrfs_root *log; 3382 u64 index; 3383 int ret; 3384 3385 if (inode->logged_trans < trans->transid) 3386 return 0; 3387 3388 ret = join_running_log_trans(root); 3389 if (ret) 3390 return 0; 3391 log = root->log_root; 3392 mutex_lock(&inode->log_mutex); 3393 3394 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3395 dirid, &index); 3396 mutex_unlock(&inode->log_mutex); 3397 if (ret == -ENOSPC) { 3398 btrfs_set_log_full_commit(fs_info, trans); 3399 ret = 0; 3400 } else if (ret < 0 && ret != -ENOENT) 3401 btrfs_abort_transaction(trans, ret); 3402 btrfs_end_log_trans(root); 3403 3404 return ret; 3405 } 3406 3407 /* 3408 * creates a range item in the log for 'dirid'. first_offset and 3409 * last_offset tell us which parts of the key space the log should 3410 * be considered authoritative for. 3411 */ 3412 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3413 struct btrfs_root *log, 3414 struct btrfs_path *path, 3415 int key_type, u64 dirid, 3416 u64 first_offset, u64 last_offset) 3417 { 3418 int ret; 3419 struct btrfs_key key; 3420 struct btrfs_dir_log_item *item; 3421 3422 key.objectid = dirid; 3423 key.offset = first_offset; 3424 if (key_type == BTRFS_DIR_ITEM_KEY) 3425 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3426 else 3427 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3428 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3429 if (ret) 3430 return ret; 3431 3432 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3433 struct btrfs_dir_log_item); 3434 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3435 btrfs_mark_buffer_dirty(path->nodes[0]); 3436 btrfs_release_path(path); 3437 return 0; 3438 } 3439 3440 /* 3441 * log all the items included in the current transaction for a given 3442 * directory. This also creates the range items in the log tree required 3443 * to replay anything deleted before the fsync 3444 */ 3445 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3446 struct btrfs_root *root, struct btrfs_inode *inode, 3447 struct btrfs_path *path, 3448 struct btrfs_path *dst_path, int key_type, 3449 struct btrfs_log_ctx *ctx, 3450 u64 min_offset, u64 *last_offset_ret) 3451 { 3452 struct btrfs_key min_key; 3453 struct btrfs_root *log = root->log_root; 3454 struct extent_buffer *src; 3455 int err = 0; 3456 int ret; 3457 int i; 3458 int nritems; 3459 u64 first_offset = min_offset; 3460 u64 last_offset = (u64)-1; 3461 u64 ino = btrfs_ino(inode); 3462 3463 log = root->log_root; 3464 3465 min_key.objectid = ino; 3466 min_key.type = key_type; 3467 min_key.offset = min_offset; 3468 3469 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3470 3471 /* 3472 * we didn't find anything from this transaction, see if there 3473 * is anything at all 3474 */ 3475 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3476 min_key.objectid = ino; 3477 min_key.type = key_type; 3478 min_key.offset = (u64)-1; 3479 btrfs_release_path(path); 3480 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3481 if (ret < 0) { 3482 btrfs_release_path(path); 3483 return ret; 3484 } 3485 ret = btrfs_previous_item(root, path, ino, key_type); 3486 3487 /* if ret == 0 there are items for this type, 3488 * create a range to tell us the last key of this type. 3489 * otherwise, there are no items in this directory after 3490 * *min_offset, and we create a range to indicate that. 3491 */ 3492 if (ret == 0) { 3493 struct btrfs_key tmp; 3494 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3495 path->slots[0]); 3496 if (key_type == tmp.type) 3497 first_offset = max(min_offset, tmp.offset) + 1; 3498 } 3499 goto done; 3500 } 3501 3502 /* go backward to find any previous key */ 3503 ret = btrfs_previous_item(root, path, ino, key_type); 3504 if (ret == 0) { 3505 struct btrfs_key tmp; 3506 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3507 if (key_type == tmp.type) { 3508 first_offset = tmp.offset; 3509 ret = overwrite_item(trans, log, dst_path, 3510 path->nodes[0], path->slots[0], 3511 &tmp); 3512 if (ret) { 3513 err = ret; 3514 goto done; 3515 } 3516 } 3517 } 3518 btrfs_release_path(path); 3519 3520 /* find the first key from this transaction again */ 3521 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3522 if (WARN_ON(ret != 0)) 3523 goto done; 3524 3525 /* 3526 * we have a block from this transaction, log every item in it 3527 * from our directory 3528 */ 3529 while (1) { 3530 struct btrfs_key tmp; 3531 src = path->nodes[0]; 3532 nritems = btrfs_header_nritems(src); 3533 for (i = path->slots[0]; i < nritems; i++) { 3534 struct btrfs_dir_item *di; 3535 3536 btrfs_item_key_to_cpu(src, &min_key, i); 3537 3538 if (min_key.objectid != ino || min_key.type != key_type) 3539 goto done; 3540 ret = overwrite_item(trans, log, dst_path, src, i, 3541 &min_key); 3542 if (ret) { 3543 err = ret; 3544 goto done; 3545 } 3546 3547 /* 3548 * We must make sure that when we log a directory entry, 3549 * the corresponding inode, after log replay, has a 3550 * matching link count. For example: 3551 * 3552 * touch foo 3553 * mkdir mydir 3554 * sync 3555 * ln foo mydir/bar 3556 * xfs_io -c "fsync" mydir 3557 * <crash> 3558 * <mount fs and log replay> 3559 * 3560 * Would result in a fsync log that when replayed, our 3561 * file inode would have a link count of 1, but we get 3562 * two directory entries pointing to the same inode. 3563 * After removing one of the names, it would not be 3564 * possible to remove the other name, which resulted 3565 * always in stale file handle errors, and would not 3566 * be possible to rmdir the parent directory, since 3567 * its i_size could never decrement to the value 3568 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3569 */ 3570 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3571 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3572 if (ctx && 3573 (btrfs_dir_transid(src, di) == trans->transid || 3574 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3575 tmp.type != BTRFS_ROOT_ITEM_KEY) 3576 ctx->log_new_dentries = true; 3577 } 3578 path->slots[0] = nritems; 3579 3580 /* 3581 * look ahead to the next item and see if it is also 3582 * from this directory and from this transaction 3583 */ 3584 ret = btrfs_next_leaf(root, path); 3585 if (ret) { 3586 if (ret == 1) 3587 last_offset = (u64)-1; 3588 else 3589 err = ret; 3590 goto done; 3591 } 3592 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3593 if (tmp.objectid != ino || tmp.type != key_type) { 3594 last_offset = (u64)-1; 3595 goto done; 3596 } 3597 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3598 ret = overwrite_item(trans, log, dst_path, 3599 path->nodes[0], path->slots[0], 3600 &tmp); 3601 if (ret) 3602 err = ret; 3603 else 3604 last_offset = tmp.offset; 3605 goto done; 3606 } 3607 } 3608 done: 3609 btrfs_release_path(path); 3610 btrfs_release_path(dst_path); 3611 3612 if (err == 0) { 3613 *last_offset_ret = last_offset; 3614 /* 3615 * insert the log range keys to indicate where the log 3616 * is valid 3617 */ 3618 ret = insert_dir_log_key(trans, log, path, key_type, 3619 ino, first_offset, last_offset); 3620 if (ret) 3621 err = ret; 3622 } 3623 return err; 3624 } 3625 3626 /* 3627 * logging directories is very similar to logging inodes, We find all the items 3628 * from the current transaction and write them to the log. 3629 * 3630 * The recovery code scans the directory in the subvolume, and if it finds a 3631 * key in the range logged that is not present in the log tree, then it means 3632 * that dir entry was unlinked during the transaction. 3633 * 3634 * In order for that scan to work, we must include one key smaller than 3635 * the smallest logged by this transaction and one key larger than the largest 3636 * key logged by this transaction. 3637 */ 3638 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3639 struct btrfs_root *root, struct btrfs_inode *inode, 3640 struct btrfs_path *path, 3641 struct btrfs_path *dst_path, 3642 struct btrfs_log_ctx *ctx) 3643 { 3644 u64 min_key; 3645 u64 max_key; 3646 int ret; 3647 int key_type = BTRFS_DIR_ITEM_KEY; 3648 3649 again: 3650 min_key = 0; 3651 max_key = 0; 3652 while (1) { 3653 ret = log_dir_items(trans, root, inode, path, dst_path, key_type, 3654 ctx, min_key, &max_key); 3655 if (ret) 3656 return ret; 3657 if (max_key == (u64)-1) 3658 break; 3659 min_key = max_key + 1; 3660 } 3661 3662 if (key_type == BTRFS_DIR_ITEM_KEY) { 3663 key_type = BTRFS_DIR_INDEX_KEY; 3664 goto again; 3665 } 3666 return 0; 3667 } 3668 3669 /* 3670 * a helper function to drop items from the log before we relog an 3671 * inode. max_key_type indicates the highest item type to remove. 3672 * This cannot be run for file data extents because it does not 3673 * free the extents they point to. 3674 */ 3675 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3676 struct btrfs_root *log, 3677 struct btrfs_path *path, 3678 u64 objectid, int max_key_type) 3679 { 3680 int ret; 3681 struct btrfs_key key; 3682 struct btrfs_key found_key; 3683 int start_slot; 3684 3685 key.objectid = objectid; 3686 key.type = max_key_type; 3687 key.offset = (u64)-1; 3688 3689 while (1) { 3690 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3691 BUG_ON(ret == 0); /* Logic error */ 3692 if (ret < 0) 3693 break; 3694 3695 if (path->slots[0] == 0) 3696 break; 3697 3698 path->slots[0]--; 3699 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3700 path->slots[0]); 3701 3702 if (found_key.objectid != objectid) 3703 break; 3704 3705 found_key.offset = 0; 3706 found_key.type = 0; 3707 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3708 &start_slot); 3709 3710 ret = btrfs_del_items(trans, log, path, start_slot, 3711 path->slots[0] - start_slot + 1); 3712 /* 3713 * If start slot isn't 0 then we don't need to re-search, we've 3714 * found the last guy with the objectid in this tree. 3715 */ 3716 if (ret || start_slot != 0) 3717 break; 3718 btrfs_release_path(path); 3719 } 3720 btrfs_release_path(path); 3721 if (ret > 0) 3722 ret = 0; 3723 return ret; 3724 } 3725 3726 static void fill_inode_item(struct btrfs_trans_handle *trans, 3727 struct extent_buffer *leaf, 3728 struct btrfs_inode_item *item, 3729 struct inode *inode, int log_inode_only, 3730 u64 logged_isize) 3731 { 3732 struct btrfs_map_token token; 3733 3734 btrfs_init_map_token(&token); 3735 3736 if (log_inode_only) { 3737 /* set the generation to zero so the recover code 3738 * can tell the difference between an logging 3739 * just to say 'this inode exists' and a logging 3740 * to say 'update this inode with these values' 3741 */ 3742 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3743 btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3744 } else { 3745 btrfs_set_token_inode_generation(leaf, item, 3746 BTRFS_I(inode)->generation, 3747 &token); 3748 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3749 } 3750 3751 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3752 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3753 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3754 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3755 3756 btrfs_set_token_timespec_sec(leaf, &item->atime, 3757 inode->i_atime.tv_sec, &token); 3758 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3759 inode->i_atime.tv_nsec, &token); 3760 3761 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3762 inode->i_mtime.tv_sec, &token); 3763 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3764 inode->i_mtime.tv_nsec, &token); 3765 3766 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3767 inode->i_ctime.tv_sec, &token); 3768 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3769 inode->i_ctime.tv_nsec, &token); 3770 3771 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3772 &token); 3773 3774 btrfs_set_token_inode_sequence(leaf, item, 3775 inode_peek_iversion(inode), &token); 3776 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3777 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3778 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3779 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3780 } 3781 3782 static int log_inode_item(struct btrfs_trans_handle *trans, 3783 struct btrfs_root *log, struct btrfs_path *path, 3784 struct btrfs_inode *inode) 3785 { 3786 struct btrfs_inode_item *inode_item; 3787 int ret; 3788 3789 ret = btrfs_insert_empty_item(trans, log, path, 3790 &inode->location, sizeof(*inode_item)); 3791 if (ret && ret != -EEXIST) 3792 return ret; 3793 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3794 struct btrfs_inode_item); 3795 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 3796 0, 0); 3797 btrfs_release_path(path); 3798 return 0; 3799 } 3800 3801 static noinline int copy_items(struct btrfs_trans_handle *trans, 3802 struct btrfs_inode *inode, 3803 struct btrfs_path *dst_path, 3804 struct btrfs_path *src_path, u64 *last_extent, 3805 int start_slot, int nr, int inode_only, 3806 u64 logged_isize) 3807 { 3808 struct btrfs_fs_info *fs_info = trans->fs_info; 3809 unsigned long src_offset; 3810 unsigned long dst_offset; 3811 struct btrfs_root *log = inode->root->log_root; 3812 struct btrfs_file_extent_item *extent; 3813 struct btrfs_inode_item *inode_item; 3814 struct extent_buffer *src = src_path->nodes[0]; 3815 struct btrfs_key first_key, last_key, key; 3816 int ret; 3817 struct btrfs_key *ins_keys; 3818 u32 *ins_sizes; 3819 char *ins_data; 3820 int i; 3821 struct list_head ordered_sums; 3822 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; 3823 bool has_extents = false; 3824 bool need_find_last_extent = true; 3825 bool done = false; 3826 3827 INIT_LIST_HEAD(&ordered_sums); 3828 3829 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3830 nr * sizeof(u32), GFP_NOFS); 3831 if (!ins_data) 3832 return -ENOMEM; 3833 3834 first_key.objectid = (u64)-1; 3835 3836 ins_sizes = (u32 *)ins_data; 3837 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3838 3839 for (i = 0; i < nr; i++) { 3840 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3841 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3842 } 3843 ret = btrfs_insert_empty_items(trans, log, dst_path, 3844 ins_keys, ins_sizes, nr); 3845 if (ret) { 3846 kfree(ins_data); 3847 return ret; 3848 } 3849 3850 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3851 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3852 dst_path->slots[0]); 3853 3854 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3855 3856 if (i == nr - 1) 3857 last_key = ins_keys[i]; 3858 3859 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3860 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3861 dst_path->slots[0], 3862 struct btrfs_inode_item); 3863 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3864 &inode->vfs_inode, 3865 inode_only == LOG_INODE_EXISTS, 3866 logged_isize); 3867 } else { 3868 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3869 src_offset, ins_sizes[i]); 3870 } 3871 3872 /* 3873 * We set need_find_last_extent here in case we know we were 3874 * processing other items and then walk into the first extent in 3875 * the inode. If we don't hit an extent then nothing changes, 3876 * we'll do the last search the next time around. 3877 */ 3878 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 3879 has_extents = true; 3880 if (first_key.objectid == (u64)-1) 3881 first_key = ins_keys[i]; 3882 } else { 3883 need_find_last_extent = false; 3884 } 3885 3886 /* take a reference on file data extents so that truncates 3887 * or deletes of this inode don't have to relog the inode 3888 * again 3889 */ 3890 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 3891 !skip_csum) { 3892 int found_type; 3893 extent = btrfs_item_ptr(src, start_slot + i, 3894 struct btrfs_file_extent_item); 3895 3896 if (btrfs_file_extent_generation(src, extent) < trans->transid) 3897 continue; 3898 3899 found_type = btrfs_file_extent_type(src, extent); 3900 if (found_type == BTRFS_FILE_EXTENT_REG) { 3901 u64 ds, dl, cs, cl; 3902 ds = btrfs_file_extent_disk_bytenr(src, 3903 extent); 3904 /* ds == 0 is a hole */ 3905 if (ds == 0) 3906 continue; 3907 3908 dl = btrfs_file_extent_disk_num_bytes(src, 3909 extent); 3910 cs = btrfs_file_extent_offset(src, extent); 3911 cl = btrfs_file_extent_num_bytes(src, 3912 extent); 3913 if (btrfs_file_extent_compression(src, 3914 extent)) { 3915 cs = 0; 3916 cl = dl; 3917 } 3918 3919 ret = btrfs_lookup_csums_range( 3920 fs_info->csum_root, 3921 ds + cs, ds + cs + cl - 1, 3922 &ordered_sums, 0); 3923 if (ret) { 3924 btrfs_release_path(dst_path); 3925 kfree(ins_data); 3926 return ret; 3927 } 3928 } 3929 } 3930 } 3931 3932 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 3933 btrfs_release_path(dst_path); 3934 kfree(ins_data); 3935 3936 /* 3937 * we have to do this after the loop above to avoid changing the 3938 * log tree while trying to change the log tree. 3939 */ 3940 ret = 0; 3941 while (!list_empty(&ordered_sums)) { 3942 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3943 struct btrfs_ordered_sum, 3944 list); 3945 if (!ret) 3946 ret = btrfs_csum_file_blocks(trans, log, sums); 3947 list_del(&sums->list); 3948 kfree(sums); 3949 } 3950 3951 if (!has_extents) 3952 return ret; 3953 3954 if (need_find_last_extent && *last_extent == first_key.offset) { 3955 /* 3956 * We don't have any leafs between our current one and the one 3957 * we processed before that can have file extent items for our 3958 * inode (and have a generation number smaller than our current 3959 * transaction id). 3960 */ 3961 need_find_last_extent = false; 3962 } 3963 3964 /* 3965 * Because we use btrfs_search_forward we could skip leaves that were 3966 * not modified and then assume *last_extent is valid when it really 3967 * isn't. So back up to the previous leaf and read the end of the last 3968 * extent before we go and fill in holes. 3969 */ 3970 if (need_find_last_extent) { 3971 u64 len; 3972 3973 ret = btrfs_prev_leaf(inode->root, src_path); 3974 if (ret < 0) 3975 return ret; 3976 if (ret) 3977 goto fill_holes; 3978 if (src_path->slots[0]) 3979 src_path->slots[0]--; 3980 src = src_path->nodes[0]; 3981 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 3982 if (key.objectid != btrfs_ino(inode) || 3983 key.type != BTRFS_EXTENT_DATA_KEY) 3984 goto fill_holes; 3985 extent = btrfs_item_ptr(src, src_path->slots[0], 3986 struct btrfs_file_extent_item); 3987 if (btrfs_file_extent_type(src, extent) == 3988 BTRFS_FILE_EXTENT_INLINE) { 3989 len = btrfs_file_extent_ram_bytes(src, extent); 3990 *last_extent = ALIGN(key.offset + len, 3991 fs_info->sectorsize); 3992 } else { 3993 len = btrfs_file_extent_num_bytes(src, extent); 3994 *last_extent = key.offset + len; 3995 } 3996 } 3997 fill_holes: 3998 /* So we did prev_leaf, now we need to move to the next leaf, but a few 3999 * things could have happened 4000 * 4001 * 1) A merge could have happened, so we could currently be on a leaf 4002 * that holds what we were copying in the first place. 4003 * 2) A split could have happened, and now not all of the items we want 4004 * are on the same leaf. 4005 * 4006 * So we need to adjust how we search for holes, we need to drop the 4007 * path and re-search for the first extent key we found, and then walk 4008 * forward until we hit the last one we copied. 4009 */ 4010 if (need_find_last_extent) { 4011 /* btrfs_prev_leaf could return 1 without releasing the path */ 4012 btrfs_release_path(src_path); 4013 ret = btrfs_search_slot(NULL, inode->root, &first_key, 4014 src_path, 0, 0); 4015 if (ret < 0) 4016 return ret; 4017 ASSERT(ret == 0); 4018 src = src_path->nodes[0]; 4019 i = src_path->slots[0]; 4020 } else { 4021 i = start_slot; 4022 } 4023 4024 /* 4025 * Ok so here we need to go through and fill in any holes we may have 4026 * to make sure that holes are punched for those areas in case they had 4027 * extents previously. 4028 */ 4029 while (!done) { 4030 u64 offset, len; 4031 u64 extent_end; 4032 4033 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 4034 ret = btrfs_next_leaf(inode->root, src_path); 4035 if (ret < 0) 4036 return ret; 4037 ASSERT(ret == 0); 4038 src = src_path->nodes[0]; 4039 i = 0; 4040 need_find_last_extent = true; 4041 } 4042 4043 btrfs_item_key_to_cpu(src, &key, i); 4044 if (!btrfs_comp_cpu_keys(&key, &last_key)) 4045 done = true; 4046 if (key.objectid != btrfs_ino(inode) || 4047 key.type != BTRFS_EXTENT_DATA_KEY) { 4048 i++; 4049 continue; 4050 } 4051 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 4052 if (btrfs_file_extent_type(src, extent) == 4053 BTRFS_FILE_EXTENT_INLINE) { 4054 len = btrfs_file_extent_ram_bytes(src, extent); 4055 extent_end = ALIGN(key.offset + len, 4056 fs_info->sectorsize); 4057 } else { 4058 len = btrfs_file_extent_num_bytes(src, extent); 4059 extent_end = key.offset + len; 4060 } 4061 i++; 4062 4063 if (*last_extent == key.offset) { 4064 *last_extent = extent_end; 4065 continue; 4066 } 4067 offset = *last_extent; 4068 len = key.offset - *last_extent; 4069 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 4070 offset, 0, 0, len, 0, len, 0, 0, 0); 4071 if (ret) 4072 break; 4073 *last_extent = extent_end; 4074 } 4075 4076 /* 4077 * Check if there is a hole between the last extent found in our leaf 4078 * and the first extent in the next leaf. If there is one, we need to 4079 * log an explicit hole so that at replay time we can punch the hole. 4080 */ 4081 if (ret == 0 && 4082 key.objectid == btrfs_ino(inode) && 4083 key.type == BTRFS_EXTENT_DATA_KEY && 4084 i == btrfs_header_nritems(src_path->nodes[0])) { 4085 ret = btrfs_next_leaf(inode->root, src_path); 4086 need_find_last_extent = true; 4087 if (ret > 0) { 4088 ret = 0; 4089 } else if (ret == 0) { 4090 btrfs_item_key_to_cpu(src_path->nodes[0], &key, 4091 src_path->slots[0]); 4092 if (key.objectid == btrfs_ino(inode) && 4093 key.type == BTRFS_EXTENT_DATA_KEY && 4094 *last_extent < key.offset) { 4095 const u64 len = key.offset - *last_extent; 4096 4097 ret = btrfs_insert_file_extent(trans, log, 4098 btrfs_ino(inode), 4099 *last_extent, 0, 4100 0, len, 0, len, 4101 0, 0, 0); 4102 } 4103 } 4104 } 4105 /* 4106 * Need to let the callers know we dropped the path so they should 4107 * re-search. 4108 */ 4109 if (!ret && need_find_last_extent) 4110 ret = 1; 4111 return ret; 4112 } 4113 4114 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 4115 { 4116 struct extent_map *em1, *em2; 4117 4118 em1 = list_entry(a, struct extent_map, list); 4119 em2 = list_entry(b, struct extent_map, list); 4120 4121 if (em1->start < em2->start) 4122 return -1; 4123 else if (em1->start > em2->start) 4124 return 1; 4125 return 0; 4126 } 4127 4128 static int log_extent_csums(struct btrfs_trans_handle *trans, 4129 struct btrfs_inode *inode, 4130 struct btrfs_root *log_root, 4131 const struct extent_map *em) 4132 { 4133 u64 csum_offset; 4134 u64 csum_len; 4135 LIST_HEAD(ordered_sums); 4136 int ret = 0; 4137 4138 if (inode->flags & BTRFS_INODE_NODATASUM || 4139 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 4140 em->block_start == EXTENT_MAP_HOLE) 4141 return 0; 4142 4143 /* If we're compressed we have to save the entire range of csums. */ 4144 if (em->compress_type) { 4145 csum_offset = 0; 4146 csum_len = max(em->block_len, em->orig_block_len); 4147 } else { 4148 csum_offset = em->mod_start - em->start; 4149 csum_len = em->mod_len; 4150 } 4151 4152 /* block start is already adjusted for the file extent offset. */ 4153 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root, 4154 em->block_start + csum_offset, 4155 em->block_start + csum_offset + 4156 csum_len - 1, &ordered_sums, 0); 4157 if (ret) 4158 return ret; 4159 4160 while (!list_empty(&ordered_sums)) { 4161 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4162 struct btrfs_ordered_sum, 4163 list); 4164 if (!ret) 4165 ret = btrfs_csum_file_blocks(trans, log_root, sums); 4166 list_del(&sums->list); 4167 kfree(sums); 4168 } 4169 4170 return ret; 4171 } 4172 4173 static int log_one_extent(struct btrfs_trans_handle *trans, 4174 struct btrfs_inode *inode, struct btrfs_root *root, 4175 const struct extent_map *em, 4176 struct btrfs_path *path, 4177 struct btrfs_log_ctx *ctx) 4178 { 4179 struct btrfs_root *log = root->log_root; 4180 struct btrfs_file_extent_item *fi; 4181 struct extent_buffer *leaf; 4182 struct btrfs_map_token token; 4183 struct btrfs_key key; 4184 u64 extent_offset = em->start - em->orig_start; 4185 u64 block_len; 4186 int ret; 4187 int extent_inserted = 0; 4188 4189 ret = log_extent_csums(trans, inode, log, em); 4190 if (ret) 4191 return ret; 4192 4193 btrfs_init_map_token(&token); 4194 4195 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, 4196 em->start + em->len, NULL, 0, 1, 4197 sizeof(*fi), &extent_inserted); 4198 if (ret) 4199 return ret; 4200 4201 if (!extent_inserted) { 4202 key.objectid = btrfs_ino(inode); 4203 key.type = BTRFS_EXTENT_DATA_KEY; 4204 key.offset = em->start; 4205 4206 ret = btrfs_insert_empty_item(trans, log, path, &key, 4207 sizeof(*fi)); 4208 if (ret) 4209 return ret; 4210 } 4211 leaf = path->nodes[0]; 4212 fi = btrfs_item_ptr(leaf, path->slots[0], 4213 struct btrfs_file_extent_item); 4214 4215 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4216 &token); 4217 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4218 btrfs_set_token_file_extent_type(leaf, fi, 4219 BTRFS_FILE_EXTENT_PREALLOC, 4220 &token); 4221 else 4222 btrfs_set_token_file_extent_type(leaf, fi, 4223 BTRFS_FILE_EXTENT_REG, 4224 &token); 4225 4226 block_len = max(em->block_len, em->orig_block_len); 4227 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4228 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4229 em->block_start, 4230 &token); 4231 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4232 &token); 4233 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4234 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4235 em->block_start - 4236 extent_offset, &token); 4237 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4238 &token); 4239 } else { 4240 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4241 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4242 &token); 4243 } 4244 4245 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4246 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4247 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4248 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4249 &token); 4250 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4251 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4252 btrfs_mark_buffer_dirty(leaf); 4253 4254 btrfs_release_path(path); 4255 4256 return ret; 4257 } 4258 4259 /* 4260 * Log all prealloc extents beyond the inode's i_size to make sure we do not 4261 * lose them after doing a fast fsync and replaying the log. We scan the 4262 * subvolume's root instead of iterating the inode's extent map tree because 4263 * otherwise we can log incorrect extent items based on extent map conversion. 4264 * That can happen due to the fact that extent maps are merged when they 4265 * are not in the extent map tree's list of modified extents. 4266 */ 4267 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, 4268 struct btrfs_inode *inode, 4269 struct btrfs_path *path) 4270 { 4271 struct btrfs_root *root = inode->root; 4272 struct btrfs_key key; 4273 const u64 i_size = i_size_read(&inode->vfs_inode); 4274 const u64 ino = btrfs_ino(inode); 4275 struct btrfs_path *dst_path = NULL; 4276 u64 last_extent = (u64)-1; 4277 int ins_nr = 0; 4278 int start_slot; 4279 int ret; 4280 4281 if (!(inode->flags & BTRFS_INODE_PREALLOC)) 4282 return 0; 4283 4284 key.objectid = ino; 4285 key.type = BTRFS_EXTENT_DATA_KEY; 4286 key.offset = i_size; 4287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4288 if (ret < 0) 4289 goto out; 4290 4291 while (true) { 4292 struct extent_buffer *leaf = path->nodes[0]; 4293 int slot = path->slots[0]; 4294 4295 if (slot >= btrfs_header_nritems(leaf)) { 4296 if (ins_nr > 0) { 4297 ret = copy_items(trans, inode, dst_path, path, 4298 &last_extent, start_slot, 4299 ins_nr, 1, 0); 4300 if (ret < 0) 4301 goto out; 4302 ins_nr = 0; 4303 } 4304 ret = btrfs_next_leaf(root, path); 4305 if (ret < 0) 4306 goto out; 4307 if (ret > 0) { 4308 ret = 0; 4309 break; 4310 } 4311 continue; 4312 } 4313 4314 btrfs_item_key_to_cpu(leaf, &key, slot); 4315 if (key.objectid > ino) 4316 break; 4317 if (WARN_ON_ONCE(key.objectid < ino) || 4318 key.type < BTRFS_EXTENT_DATA_KEY || 4319 key.offset < i_size) { 4320 path->slots[0]++; 4321 continue; 4322 } 4323 if (last_extent == (u64)-1) { 4324 last_extent = key.offset; 4325 /* 4326 * Avoid logging extent items logged in past fsync calls 4327 * and leading to duplicate keys in the log tree. 4328 */ 4329 do { 4330 ret = btrfs_truncate_inode_items(trans, 4331 root->log_root, 4332 &inode->vfs_inode, 4333 i_size, 4334 BTRFS_EXTENT_DATA_KEY); 4335 } while (ret == -EAGAIN); 4336 if (ret) 4337 goto out; 4338 } 4339 if (ins_nr == 0) 4340 start_slot = slot; 4341 ins_nr++; 4342 path->slots[0]++; 4343 if (!dst_path) { 4344 dst_path = btrfs_alloc_path(); 4345 if (!dst_path) { 4346 ret = -ENOMEM; 4347 goto out; 4348 } 4349 } 4350 } 4351 if (ins_nr > 0) { 4352 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4353 start_slot, ins_nr, 1, 0); 4354 if (ret > 0) 4355 ret = 0; 4356 } 4357 out: 4358 btrfs_release_path(path); 4359 btrfs_free_path(dst_path); 4360 return ret; 4361 } 4362 4363 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4364 struct btrfs_root *root, 4365 struct btrfs_inode *inode, 4366 struct btrfs_path *path, 4367 struct btrfs_log_ctx *ctx, 4368 const u64 start, 4369 const u64 end) 4370 { 4371 struct extent_map *em, *n; 4372 struct list_head extents; 4373 struct extent_map_tree *tree = &inode->extent_tree; 4374 u64 test_gen; 4375 int ret = 0; 4376 int num = 0; 4377 4378 INIT_LIST_HEAD(&extents); 4379 4380 write_lock(&tree->lock); 4381 test_gen = root->fs_info->last_trans_committed; 4382 4383 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4384 /* 4385 * Skip extents outside our logging range. It's important to do 4386 * it for correctness because if we don't ignore them, we may 4387 * log them before their ordered extent completes, and therefore 4388 * we could log them without logging their respective checksums 4389 * (the checksum items are added to the csum tree at the very 4390 * end of btrfs_finish_ordered_io()). Also leave such extents 4391 * outside of our range in the list, since we may have another 4392 * ranged fsync in the near future that needs them. If an extent 4393 * outside our range corresponds to a hole, log it to avoid 4394 * leaving gaps between extents (fsck will complain when we are 4395 * not using the NO_HOLES feature). 4396 */ 4397 if ((em->start > end || em->start + em->len <= start) && 4398 em->block_start != EXTENT_MAP_HOLE) 4399 continue; 4400 4401 list_del_init(&em->list); 4402 /* 4403 * Just an arbitrary number, this can be really CPU intensive 4404 * once we start getting a lot of extents, and really once we 4405 * have a bunch of extents we just want to commit since it will 4406 * be faster. 4407 */ 4408 if (++num > 32768) { 4409 list_del_init(&tree->modified_extents); 4410 ret = -EFBIG; 4411 goto process; 4412 } 4413 4414 if (em->generation <= test_gen) 4415 continue; 4416 4417 /* We log prealloc extents beyond eof later. */ 4418 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && 4419 em->start >= i_size_read(&inode->vfs_inode)) 4420 continue; 4421 4422 /* Need a ref to keep it from getting evicted from cache */ 4423 refcount_inc(&em->refs); 4424 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4425 list_add_tail(&em->list, &extents); 4426 num++; 4427 } 4428 4429 list_sort(NULL, &extents, extent_cmp); 4430 process: 4431 while (!list_empty(&extents)) { 4432 em = list_entry(extents.next, struct extent_map, list); 4433 4434 list_del_init(&em->list); 4435 4436 /* 4437 * If we had an error we just need to delete everybody from our 4438 * private list. 4439 */ 4440 if (ret) { 4441 clear_em_logging(tree, em); 4442 free_extent_map(em); 4443 continue; 4444 } 4445 4446 write_unlock(&tree->lock); 4447 4448 ret = log_one_extent(trans, inode, root, em, path, ctx); 4449 write_lock(&tree->lock); 4450 clear_em_logging(tree, em); 4451 free_extent_map(em); 4452 } 4453 WARN_ON(!list_empty(&extents)); 4454 write_unlock(&tree->lock); 4455 4456 btrfs_release_path(path); 4457 if (!ret) 4458 ret = btrfs_log_prealloc_extents(trans, inode, path); 4459 4460 return ret; 4461 } 4462 4463 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, 4464 struct btrfs_path *path, u64 *size_ret) 4465 { 4466 struct btrfs_key key; 4467 int ret; 4468 4469 key.objectid = btrfs_ino(inode); 4470 key.type = BTRFS_INODE_ITEM_KEY; 4471 key.offset = 0; 4472 4473 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4474 if (ret < 0) { 4475 return ret; 4476 } else if (ret > 0) { 4477 *size_ret = 0; 4478 } else { 4479 struct btrfs_inode_item *item; 4480 4481 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4482 struct btrfs_inode_item); 4483 *size_ret = btrfs_inode_size(path->nodes[0], item); 4484 } 4485 4486 btrfs_release_path(path); 4487 return 0; 4488 } 4489 4490 /* 4491 * At the moment we always log all xattrs. This is to figure out at log replay 4492 * time which xattrs must have their deletion replayed. If a xattr is missing 4493 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4494 * because if a xattr is deleted, the inode is fsynced and a power failure 4495 * happens, causing the log to be replayed the next time the fs is mounted, 4496 * we want the xattr to not exist anymore (same behaviour as other filesystems 4497 * with a journal, ext3/4, xfs, f2fs, etc). 4498 */ 4499 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4500 struct btrfs_root *root, 4501 struct btrfs_inode *inode, 4502 struct btrfs_path *path, 4503 struct btrfs_path *dst_path) 4504 { 4505 int ret; 4506 struct btrfs_key key; 4507 const u64 ino = btrfs_ino(inode); 4508 int ins_nr = 0; 4509 int start_slot = 0; 4510 4511 key.objectid = ino; 4512 key.type = BTRFS_XATTR_ITEM_KEY; 4513 key.offset = 0; 4514 4515 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4516 if (ret < 0) 4517 return ret; 4518 4519 while (true) { 4520 int slot = path->slots[0]; 4521 struct extent_buffer *leaf = path->nodes[0]; 4522 int nritems = btrfs_header_nritems(leaf); 4523 4524 if (slot >= nritems) { 4525 if (ins_nr > 0) { 4526 u64 last_extent = 0; 4527 4528 ret = copy_items(trans, inode, dst_path, path, 4529 &last_extent, start_slot, 4530 ins_nr, 1, 0); 4531 /* can't be 1, extent items aren't processed */ 4532 ASSERT(ret <= 0); 4533 if (ret < 0) 4534 return ret; 4535 ins_nr = 0; 4536 } 4537 ret = btrfs_next_leaf(root, path); 4538 if (ret < 0) 4539 return ret; 4540 else if (ret > 0) 4541 break; 4542 continue; 4543 } 4544 4545 btrfs_item_key_to_cpu(leaf, &key, slot); 4546 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4547 break; 4548 4549 if (ins_nr == 0) 4550 start_slot = slot; 4551 ins_nr++; 4552 path->slots[0]++; 4553 cond_resched(); 4554 } 4555 if (ins_nr > 0) { 4556 u64 last_extent = 0; 4557 4558 ret = copy_items(trans, inode, dst_path, path, 4559 &last_extent, start_slot, 4560 ins_nr, 1, 0); 4561 /* can't be 1, extent items aren't processed */ 4562 ASSERT(ret <= 0); 4563 if (ret < 0) 4564 return ret; 4565 } 4566 4567 return 0; 4568 } 4569 4570 /* 4571 * If the no holes feature is enabled we need to make sure any hole between the 4572 * last extent and the i_size of our inode is explicitly marked in the log. This 4573 * is to make sure that doing something like: 4574 * 4575 * 1) create file with 128Kb of data 4576 * 2) truncate file to 64Kb 4577 * 3) truncate file to 256Kb 4578 * 4) fsync file 4579 * 5) <crash/power failure> 4580 * 6) mount fs and trigger log replay 4581 * 4582 * Will give us a file with a size of 256Kb, the first 64Kb of data match what 4583 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the 4584 * file correspond to a hole. The presence of explicit holes in a log tree is 4585 * what guarantees that log replay will remove/adjust file extent items in the 4586 * fs/subvol tree. 4587 * 4588 * Here we do not need to care about holes between extents, that is already done 4589 * by copy_items(). We also only need to do this in the full sync path, where we 4590 * lookup for extents from the fs/subvol tree only. In the fast path case, we 4591 * lookup the list of modified extent maps and if any represents a hole, we 4592 * insert a corresponding extent representing a hole in the log tree. 4593 */ 4594 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, 4595 struct btrfs_root *root, 4596 struct btrfs_inode *inode, 4597 struct btrfs_path *path) 4598 { 4599 struct btrfs_fs_info *fs_info = root->fs_info; 4600 int ret; 4601 struct btrfs_key key; 4602 u64 hole_start; 4603 u64 hole_size; 4604 struct extent_buffer *leaf; 4605 struct btrfs_root *log = root->log_root; 4606 const u64 ino = btrfs_ino(inode); 4607 const u64 i_size = i_size_read(&inode->vfs_inode); 4608 4609 if (!btrfs_fs_incompat(fs_info, NO_HOLES)) 4610 return 0; 4611 4612 key.objectid = ino; 4613 key.type = BTRFS_EXTENT_DATA_KEY; 4614 key.offset = (u64)-1; 4615 4616 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4617 ASSERT(ret != 0); 4618 if (ret < 0) 4619 return ret; 4620 4621 ASSERT(path->slots[0] > 0); 4622 path->slots[0]--; 4623 leaf = path->nodes[0]; 4624 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4625 4626 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { 4627 /* inode does not have any extents */ 4628 hole_start = 0; 4629 hole_size = i_size; 4630 } else { 4631 struct btrfs_file_extent_item *extent; 4632 u64 len; 4633 4634 /* 4635 * If there's an extent beyond i_size, an explicit hole was 4636 * already inserted by copy_items(). 4637 */ 4638 if (key.offset >= i_size) 4639 return 0; 4640 4641 extent = btrfs_item_ptr(leaf, path->slots[0], 4642 struct btrfs_file_extent_item); 4643 4644 if (btrfs_file_extent_type(leaf, extent) == 4645 BTRFS_FILE_EXTENT_INLINE) { 4646 len = btrfs_file_extent_ram_bytes(leaf, extent); 4647 ASSERT(len == i_size || 4648 (len == fs_info->sectorsize && 4649 btrfs_file_extent_compression(leaf, extent) != 4650 BTRFS_COMPRESS_NONE) || 4651 (len < i_size && i_size < fs_info->sectorsize)); 4652 return 0; 4653 } 4654 4655 len = btrfs_file_extent_num_bytes(leaf, extent); 4656 /* Last extent goes beyond i_size, no need to log a hole. */ 4657 if (key.offset + len > i_size) 4658 return 0; 4659 hole_start = key.offset + len; 4660 hole_size = i_size - hole_start; 4661 } 4662 btrfs_release_path(path); 4663 4664 /* Last extent ends at i_size. */ 4665 if (hole_size == 0) 4666 return 0; 4667 4668 hole_size = ALIGN(hole_size, fs_info->sectorsize); 4669 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, 4670 hole_size, 0, hole_size, 0, 0, 0); 4671 return ret; 4672 } 4673 4674 /* 4675 * When we are logging a new inode X, check if it doesn't have a reference that 4676 * matches the reference from some other inode Y created in a past transaction 4677 * and that was renamed in the current transaction. If we don't do this, then at 4678 * log replay time we can lose inode Y (and all its files if it's a directory): 4679 * 4680 * mkdir /mnt/x 4681 * echo "hello world" > /mnt/x/foobar 4682 * sync 4683 * mv /mnt/x /mnt/y 4684 * mkdir /mnt/x # or touch /mnt/x 4685 * xfs_io -c fsync /mnt/x 4686 * <power fail> 4687 * mount fs, trigger log replay 4688 * 4689 * After the log replay procedure, we would lose the first directory and all its 4690 * files (file foobar). 4691 * For the case where inode Y is not a directory we simply end up losing it: 4692 * 4693 * echo "123" > /mnt/foo 4694 * sync 4695 * mv /mnt/foo /mnt/bar 4696 * echo "abc" > /mnt/foo 4697 * xfs_io -c fsync /mnt/foo 4698 * <power fail> 4699 * 4700 * We also need this for cases where a snapshot entry is replaced by some other 4701 * entry (file or directory) otherwise we end up with an unreplayable log due to 4702 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4703 * if it were a regular entry: 4704 * 4705 * mkdir /mnt/x 4706 * btrfs subvolume snapshot /mnt /mnt/x/snap 4707 * btrfs subvolume delete /mnt/x/snap 4708 * rmdir /mnt/x 4709 * mkdir /mnt/x 4710 * fsync /mnt/x or fsync some new file inside it 4711 * <power fail> 4712 * 4713 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4714 * the same transaction. 4715 */ 4716 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4717 const int slot, 4718 const struct btrfs_key *key, 4719 struct btrfs_inode *inode, 4720 u64 *other_ino) 4721 { 4722 int ret; 4723 struct btrfs_path *search_path; 4724 char *name = NULL; 4725 u32 name_len = 0; 4726 u32 item_size = btrfs_item_size_nr(eb, slot); 4727 u32 cur_offset = 0; 4728 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4729 4730 search_path = btrfs_alloc_path(); 4731 if (!search_path) 4732 return -ENOMEM; 4733 search_path->search_commit_root = 1; 4734 search_path->skip_locking = 1; 4735 4736 while (cur_offset < item_size) { 4737 u64 parent; 4738 u32 this_name_len; 4739 u32 this_len; 4740 unsigned long name_ptr; 4741 struct btrfs_dir_item *di; 4742 4743 if (key->type == BTRFS_INODE_REF_KEY) { 4744 struct btrfs_inode_ref *iref; 4745 4746 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4747 parent = key->offset; 4748 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4749 name_ptr = (unsigned long)(iref + 1); 4750 this_len = sizeof(*iref) + this_name_len; 4751 } else { 4752 struct btrfs_inode_extref *extref; 4753 4754 extref = (struct btrfs_inode_extref *)(ptr + 4755 cur_offset); 4756 parent = btrfs_inode_extref_parent(eb, extref); 4757 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4758 name_ptr = (unsigned long)&extref->name; 4759 this_len = sizeof(*extref) + this_name_len; 4760 } 4761 4762 if (this_name_len > name_len) { 4763 char *new_name; 4764 4765 new_name = krealloc(name, this_name_len, GFP_NOFS); 4766 if (!new_name) { 4767 ret = -ENOMEM; 4768 goto out; 4769 } 4770 name_len = this_name_len; 4771 name = new_name; 4772 } 4773 4774 read_extent_buffer(eb, name, name_ptr, this_name_len); 4775 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, 4776 parent, name, this_name_len, 0); 4777 if (di && !IS_ERR(di)) { 4778 struct btrfs_key di_key; 4779 4780 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4781 di, &di_key); 4782 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4783 ret = 1; 4784 *other_ino = di_key.objectid; 4785 } else { 4786 ret = -EAGAIN; 4787 } 4788 goto out; 4789 } else if (IS_ERR(di)) { 4790 ret = PTR_ERR(di); 4791 goto out; 4792 } 4793 btrfs_release_path(search_path); 4794 4795 cur_offset += this_len; 4796 } 4797 ret = 0; 4798 out: 4799 btrfs_free_path(search_path); 4800 kfree(name); 4801 return ret; 4802 } 4803 4804 /* log a single inode in the tree log. 4805 * At least one parent directory for this inode must exist in the tree 4806 * or be logged already. 4807 * 4808 * Any items from this inode changed by the current transaction are copied 4809 * to the log tree. An extra reference is taken on any extents in this 4810 * file, allowing us to avoid a whole pile of corner cases around logging 4811 * blocks that have been removed from the tree. 4812 * 4813 * See LOG_INODE_ALL and related defines for a description of what inode_only 4814 * does. 4815 * 4816 * This handles both files and directories. 4817 */ 4818 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 4819 struct btrfs_root *root, struct btrfs_inode *inode, 4820 int inode_only, 4821 const loff_t start, 4822 const loff_t end, 4823 struct btrfs_log_ctx *ctx) 4824 { 4825 struct btrfs_fs_info *fs_info = root->fs_info; 4826 struct btrfs_path *path; 4827 struct btrfs_path *dst_path; 4828 struct btrfs_key min_key; 4829 struct btrfs_key max_key; 4830 struct btrfs_root *log = root->log_root; 4831 u64 last_extent = 0; 4832 int err = 0; 4833 int ret; 4834 int nritems; 4835 int ins_start_slot = 0; 4836 int ins_nr; 4837 bool fast_search = false; 4838 u64 ino = btrfs_ino(inode); 4839 struct extent_map_tree *em_tree = &inode->extent_tree; 4840 u64 logged_isize = 0; 4841 bool need_log_inode_item = true; 4842 bool xattrs_logged = false; 4843 4844 path = btrfs_alloc_path(); 4845 if (!path) 4846 return -ENOMEM; 4847 dst_path = btrfs_alloc_path(); 4848 if (!dst_path) { 4849 btrfs_free_path(path); 4850 return -ENOMEM; 4851 } 4852 4853 min_key.objectid = ino; 4854 min_key.type = BTRFS_INODE_ITEM_KEY; 4855 min_key.offset = 0; 4856 4857 max_key.objectid = ino; 4858 4859 4860 /* today the code can only do partial logging of directories */ 4861 if (S_ISDIR(inode->vfs_inode.i_mode) || 4862 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4863 &inode->runtime_flags) && 4864 inode_only >= LOG_INODE_EXISTS)) 4865 max_key.type = BTRFS_XATTR_ITEM_KEY; 4866 else 4867 max_key.type = (u8)-1; 4868 max_key.offset = (u64)-1; 4869 4870 /* 4871 * Only run delayed items if we are a dir or a new file. 4872 * Otherwise commit the delayed inode only, which is needed in 4873 * order for the log replay code to mark inodes for link count 4874 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). 4875 */ 4876 if (S_ISDIR(inode->vfs_inode.i_mode) || 4877 inode->generation > fs_info->last_trans_committed) 4878 ret = btrfs_commit_inode_delayed_items(trans, inode); 4879 else 4880 ret = btrfs_commit_inode_delayed_inode(inode); 4881 4882 if (ret) { 4883 btrfs_free_path(path); 4884 btrfs_free_path(dst_path); 4885 return ret; 4886 } 4887 4888 if (inode_only == LOG_OTHER_INODE) { 4889 inode_only = LOG_INODE_EXISTS; 4890 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); 4891 } else { 4892 mutex_lock(&inode->log_mutex); 4893 } 4894 4895 /* 4896 * a brute force approach to making sure we get the most uptodate 4897 * copies of everything. 4898 */ 4899 if (S_ISDIR(inode->vfs_inode.i_mode)) { 4900 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 4901 4902 if (inode_only == LOG_INODE_EXISTS) 4903 max_key_type = BTRFS_XATTR_ITEM_KEY; 4904 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 4905 } else { 4906 if (inode_only == LOG_INODE_EXISTS) { 4907 /* 4908 * Make sure the new inode item we write to the log has 4909 * the same isize as the current one (if it exists). 4910 * This is necessary to prevent data loss after log 4911 * replay, and also to prevent doing a wrong expanding 4912 * truncate - for e.g. create file, write 4K into offset 4913 * 0, fsync, write 4K into offset 4096, add hard link, 4914 * fsync some other file (to sync log), power fail - if 4915 * we use the inode's current i_size, after log replay 4916 * we get a 8Kb file, with the last 4Kb extent as a hole 4917 * (zeroes), as if an expanding truncate happened, 4918 * instead of getting a file of 4Kb only. 4919 */ 4920 err = logged_inode_size(log, inode, path, &logged_isize); 4921 if (err) 4922 goto out_unlock; 4923 } 4924 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4925 &inode->runtime_flags)) { 4926 if (inode_only == LOG_INODE_EXISTS) { 4927 max_key.type = BTRFS_XATTR_ITEM_KEY; 4928 ret = drop_objectid_items(trans, log, path, ino, 4929 max_key.type); 4930 } else { 4931 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4932 &inode->runtime_flags); 4933 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4934 &inode->runtime_flags); 4935 while(1) { 4936 ret = btrfs_truncate_inode_items(trans, 4937 log, &inode->vfs_inode, 0, 0); 4938 if (ret != -EAGAIN) 4939 break; 4940 } 4941 } 4942 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4943 &inode->runtime_flags) || 4944 inode_only == LOG_INODE_EXISTS) { 4945 if (inode_only == LOG_INODE_ALL) 4946 fast_search = true; 4947 max_key.type = BTRFS_XATTR_ITEM_KEY; 4948 ret = drop_objectid_items(trans, log, path, ino, 4949 max_key.type); 4950 } else { 4951 if (inode_only == LOG_INODE_ALL) 4952 fast_search = true; 4953 goto log_extents; 4954 } 4955 4956 } 4957 if (ret) { 4958 err = ret; 4959 goto out_unlock; 4960 } 4961 4962 while (1) { 4963 ins_nr = 0; 4964 ret = btrfs_search_forward(root, &min_key, 4965 path, trans->transid); 4966 if (ret < 0) { 4967 err = ret; 4968 goto out_unlock; 4969 } 4970 if (ret != 0) 4971 break; 4972 again: 4973 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 4974 if (min_key.objectid != ino) 4975 break; 4976 if (min_key.type > max_key.type) 4977 break; 4978 4979 if (min_key.type == BTRFS_INODE_ITEM_KEY) 4980 need_log_inode_item = false; 4981 4982 if ((min_key.type == BTRFS_INODE_REF_KEY || 4983 min_key.type == BTRFS_INODE_EXTREF_KEY) && 4984 inode->generation == trans->transid) { 4985 u64 other_ino = 0; 4986 4987 ret = btrfs_check_ref_name_override(path->nodes[0], 4988 path->slots[0], &min_key, inode, 4989 &other_ino); 4990 if (ret < 0) { 4991 err = ret; 4992 goto out_unlock; 4993 } else if (ret > 0 && ctx && 4994 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { 4995 struct btrfs_key inode_key; 4996 struct inode *other_inode; 4997 4998 if (ins_nr > 0) { 4999 ins_nr++; 5000 } else { 5001 ins_nr = 1; 5002 ins_start_slot = path->slots[0]; 5003 } 5004 ret = copy_items(trans, inode, dst_path, path, 5005 &last_extent, ins_start_slot, 5006 ins_nr, inode_only, 5007 logged_isize); 5008 if (ret < 0) { 5009 err = ret; 5010 goto out_unlock; 5011 } 5012 ins_nr = 0; 5013 btrfs_release_path(path); 5014 inode_key.objectid = other_ino; 5015 inode_key.type = BTRFS_INODE_ITEM_KEY; 5016 inode_key.offset = 0; 5017 other_inode = btrfs_iget(fs_info->sb, 5018 &inode_key, root, 5019 NULL); 5020 /* 5021 * If the other inode that had a conflicting dir 5022 * entry was deleted in the current transaction, 5023 * we don't need to do more work nor fallback to 5024 * a transaction commit. 5025 */ 5026 if (other_inode == ERR_PTR(-ENOENT)) { 5027 goto next_key; 5028 } else if (IS_ERR(other_inode)) { 5029 err = PTR_ERR(other_inode); 5030 goto out_unlock; 5031 } 5032 /* 5033 * We are safe logging the other inode without 5034 * acquiring its i_mutex as long as we log with 5035 * the LOG_INODE_EXISTS mode. We're safe against 5036 * concurrent renames of the other inode as well 5037 * because during a rename we pin the log and 5038 * update the log with the new name before we 5039 * unpin it. 5040 */ 5041 err = btrfs_log_inode(trans, root, 5042 BTRFS_I(other_inode), 5043 LOG_OTHER_INODE, 0, LLONG_MAX, 5044 ctx); 5045 iput(other_inode); 5046 if (err) 5047 goto out_unlock; 5048 else 5049 goto next_key; 5050 } 5051 } 5052 5053 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 5054 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 5055 if (ins_nr == 0) 5056 goto next_slot; 5057 ret = copy_items(trans, inode, dst_path, path, 5058 &last_extent, ins_start_slot, 5059 ins_nr, inode_only, logged_isize); 5060 if (ret < 0) { 5061 err = ret; 5062 goto out_unlock; 5063 } 5064 ins_nr = 0; 5065 if (ret) { 5066 btrfs_release_path(path); 5067 continue; 5068 } 5069 goto next_slot; 5070 } 5071 5072 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 5073 ins_nr++; 5074 goto next_slot; 5075 } else if (!ins_nr) { 5076 ins_start_slot = path->slots[0]; 5077 ins_nr = 1; 5078 goto next_slot; 5079 } 5080 5081 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5082 ins_start_slot, ins_nr, inode_only, 5083 logged_isize); 5084 if (ret < 0) { 5085 err = ret; 5086 goto out_unlock; 5087 } 5088 if (ret) { 5089 ins_nr = 0; 5090 btrfs_release_path(path); 5091 continue; 5092 } 5093 ins_nr = 1; 5094 ins_start_slot = path->slots[0]; 5095 next_slot: 5096 5097 nritems = btrfs_header_nritems(path->nodes[0]); 5098 path->slots[0]++; 5099 if (path->slots[0] < nritems) { 5100 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 5101 path->slots[0]); 5102 goto again; 5103 } 5104 if (ins_nr) { 5105 ret = copy_items(trans, inode, dst_path, path, 5106 &last_extent, ins_start_slot, 5107 ins_nr, inode_only, logged_isize); 5108 if (ret < 0) { 5109 err = ret; 5110 goto out_unlock; 5111 } 5112 ret = 0; 5113 ins_nr = 0; 5114 } 5115 btrfs_release_path(path); 5116 next_key: 5117 if (min_key.offset < (u64)-1) { 5118 min_key.offset++; 5119 } else if (min_key.type < max_key.type) { 5120 min_key.type++; 5121 min_key.offset = 0; 5122 } else { 5123 break; 5124 } 5125 } 5126 if (ins_nr) { 5127 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5128 ins_start_slot, ins_nr, inode_only, 5129 logged_isize); 5130 if (ret < 0) { 5131 err = ret; 5132 goto out_unlock; 5133 } 5134 ret = 0; 5135 ins_nr = 0; 5136 } 5137 5138 btrfs_release_path(path); 5139 btrfs_release_path(dst_path); 5140 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 5141 if (err) 5142 goto out_unlock; 5143 xattrs_logged = true; 5144 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 5145 btrfs_release_path(path); 5146 btrfs_release_path(dst_path); 5147 err = btrfs_log_trailing_hole(trans, root, inode, path); 5148 if (err) 5149 goto out_unlock; 5150 } 5151 log_extents: 5152 btrfs_release_path(path); 5153 btrfs_release_path(dst_path); 5154 if (need_log_inode_item) { 5155 err = log_inode_item(trans, log, dst_path, inode); 5156 if (!err && !xattrs_logged) { 5157 err = btrfs_log_all_xattrs(trans, root, inode, path, 5158 dst_path); 5159 btrfs_release_path(path); 5160 } 5161 if (err) 5162 goto out_unlock; 5163 } 5164 if (fast_search) { 5165 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 5166 ctx, start, end); 5167 if (ret) { 5168 err = ret; 5169 goto out_unlock; 5170 } 5171 } else if (inode_only == LOG_INODE_ALL) { 5172 struct extent_map *em, *n; 5173 5174 write_lock(&em_tree->lock); 5175 /* 5176 * We can't just remove every em if we're called for a ranged 5177 * fsync - that is, one that doesn't cover the whole possible 5178 * file range (0 to LLONG_MAX). This is because we can have 5179 * em's that fall outside the range we're logging and therefore 5180 * their ordered operations haven't completed yet 5181 * (btrfs_finish_ordered_io() not invoked yet). This means we 5182 * didn't get their respective file extent item in the fs/subvol 5183 * tree yet, and need to let the next fast fsync (one which 5184 * consults the list of modified extent maps) find the em so 5185 * that it logs a matching file extent item and waits for the 5186 * respective ordered operation to complete (if it's still 5187 * running). 5188 * 5189 * Removing every em outside the range we're logging would make 5190 * the next fast fsync not log their matching file extent items, 5191 * therefore making us lose data after a log replay. 5192 */ 5193 list_for_each_entry_safe(em, n, &em_tree->modified_extents, 5194 list) { 5195 const u64 mod_end = em->mod_start + em->mod_len - 1; 5196 5197 if (em->mod_start >= start && mod_end <= end) 5198 list_del_init(&em->list); 5199 } 5200 write_unlock(&em_tree->lock); 5201 } 5202 5203 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { 5204 ret = log_directory_changes(trans, root, inode, path, dst_path, 5205 ctx); 5206 if (ret) { 5207 err = ret; 5208 goto out_unlock; 5209 } 5210 } 5211 5212 spin_lock(&inode->lock); 5213 inode->logged_trans = trans->transid; 5214 inode->last_log_commit = inode->last_sub_trans; 5215 spin_unlock(&inode->lock); 5216 out_unlock: 5217 mutex_unlock(&inode->log_mutex); 5218 5219 btrfs_free_path(path); 5220 btrfs_free_path(dst_path); 5221 return err; 5222 } 5223 5224 /* 5225 * Check if we must fallback to a transaction commit when logging an inode. 5226 * This must be called after logging the inode and is used only in the context 5227 * when fsyncing an inode requires the need to log some other inode - in which 5228 * case we can't lock the i_mutex of each other inode we need to log as that 5229 * can lead to deadlocks with concurrent fsync against other inodes (as we can 5230 * log inodes up or down in the hierarchy) or rename operations for example. So 5231 * we take the log_mutex of the inode after we have logged it and then check for 5232 * its last_unlink_trans value - this is safe because any task setting 5233 * last_unlink_trans must take the log_mutex and it must do this before it does 5234 * the actual unlink operation, so if we do this check before a concurrent task 5235 * sets last_unlink_trans it means we've logged a consistent version/state of 5236 * all the inode items, otherwise we are not sure and must do a transaction 5237 * commit (the concurrent task might have only updated last_unlink_trans before 5238 * we logged the inode or it might have also done the unlink). 5239 */ 5240 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 5241 struct btrfs_inode *inode) 5242 { 5243 struct btrfs_fs_info *fs_info = inode->root->fs_info; 5244 bool ret = false; 5245 5246 mutex_lock(&inode->log_mutex); 5247 if (inode->last_unlink_trans > fs_info->last_trans_committed) { 5248 /* 5249 * Make sure any commits to the log are forced to be full 5250 * commits. 5251 */ 5252 btrfs_set_log_full_commit(fs_info, trans); 5253 ret = true; 5254 } 5255 mutex_unlock(&inode->log_mutex); 5256 5257 return ret; 5258 } 5259 5260 /* 5261 * follow the dentry parent pointers up the chain and see if any 5262 * of the directories in it require a full commit before they can 5263 * be logged. Returns zero if nothing special needs to be done or 1 if 5264 * a full commit is required. 5265 */ 5266 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 5267 struct btrfs_inode *inode, 5268 struct dentry *parent, 5269 struct super_block *sb, 5270 u64 last_committed) 5271 { 5272 int ret = 0; 5273 struct dentry *old_parent = NULL; 5274 struct btrfs_inode *orig_inode = inode; 5275 5276 /* 5277 * for regular files, if its inode is already on disk, we don't 5278 * have to worry about the parents at all. This is because 5279 * we can use the last_unlink_trans field to record renames 5280 * and other fun in this file. 5281 */ 5282 if (S_ISREG(inode->vfs_inode.i_mode) && 5283 inode->generation <= last_committed && 5284 inode->last_unlink_trans <= last_committed) 5285 goto out; 5286 5287 if (!S_ISDIR(inode->vfs_inode.i_mode)) { 5288 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5289 goto out; 5290 inode = BTRFS_I(d_inode(parent)); 5291 } 5292 5293 while (1) { 5294 /* 5295 * If we are logging a directory then we start with our inode, 5296 * not our parent's inode, so we need to skip setting the 5297 * logged_trans so that further down in the log code we don't 5298 * think this inode has already been logged. 5299 */ 5300 if (inode != orig_inode) 5301 inode->logged_trans = trans->transid; 5302 smp_mb(); 5303 5304 if (btrfs_must_commit_transaction(trans, inode)) { 5305 ret = 1; 5306 break; 5307 } 5308 5309 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5310 break; 5311 5312 if (IS_ROOT(parent)) { 5313 inode = BTRFS_I(d_inode(parent)); 5314 if (btrfs_must_commit_transaction(trans, inode)) 5315 ret = 1; 5316 break; 5317 } 5318 5319 parent = dget_parent(parent); 5320 dput(old_parent); 5321 old_parent = parent; 5322 inode = BTRFS_I(d_inode(parent)); 5323 5324 } 5325 dput(old_parent); 5326 out: 5327 return ret; 5328 } 5329 5330 struct btrfs_dir_list { 5331 u64 ino; 5332 struct list_head list; 5333 }; 5334 5335 /* 5336 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5337 * details about the why it is needed. 5338 * This is a recursive operation - if an existing dentry corresponds to a 5339 * directory, that directory's new entries are logged too (same behaviour as 5340 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5341 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5342 * complains about the following circular lock dependency / possible deadlock: 5343 * 5344 * CPU0 CPU1 5345 * ---- ---- 5346 * lock(&type->i_mutex_dir_key#3/2); 5347 * lock(sb_internal#2); 5348 * lock(&type->i_mutex_dir_key#3/2); 5349 * lock(&sb->s_type->i_mutex_key#14); 5350 * 5351 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5352 * sb_start_intwrite() in btrfs_start_transaction(). 5353 * Not locking i_mutex of the inodes is still safe because: 5354 * 5355 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5356 * that while logging the inode new references (names) are added or removed 5357 * from the inode, leaving the logged inode item with a link count that does 5358 * not match the number of logged inode reference items. This is fine because 5359 * at log replay time we compute the real number of links and correct the 5360 * link count in the inode item (see replay_one_buffer() and 5361 * link_to_fixup_dir()); 5362 * 5363 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5364 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5365 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5366 * has a size that doesn't match the sum of the lengths of all the logged 5367 * names. This does not result in a problem because if a dir_item key is 5368 * logged but its matching dir_index key is not logged, at log replay time we 5369 * don't use it to replay the respective name (see replay_one_name()). On the 5370 * other hand if only the dir_index key ends up being logged, the respective 5371 * name is added to the fs/subvol tree with both the dir_item and dir_index 5372 * keys created (see replay_one_name()). 5373 * The directory's inode item with a wrong i_size is not a problem as well, 5374 * since we don't use it at log replay time to set the i_size in the inode 5375 * item of the fs/subvol tree (see overwrite_item()). 5376 */ 5377 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5378 struct btrfs_root *root, 5379 struct btrfs_inode *start_inode, 5380 struct btrfs_log_ctx *ctx) 5381 { 5382 struct btrfs_fs_info *fs_info = root->fs_info; 5383 struct btrfs_root *log = root->log_root; 5384 struct btrfs_path *path; 5385 LIST_HEAD(dir_list); 5386 struct btrfs_dir_list *dir_elem; 5387 int ret = 0; 5388 5389 path = btrfs_alloc_path(); 5390 if (!path) 5391 return -ENOMEM; 5392 5393 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5394 if (!dir_elem) { 5395 btrfs_free_path(path); 5396 return -ENOMEM; 5397 } 5398 dir_elem->ino = btrfs_ino(start_inode); 5399 list_add_tail(&dir_elem->list, &dir_list); 5400 5401 while (!list_empty(&dir_list)) { 5402 struct extent_buffer *leaf; 5403 struct btrfs_key min_key; 5404 int nritems; 5405 int i; 5406 5407 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5408 list); 5409 if (ret) 5410 goto next_dir_inode; 5411 5412 min_key.objectid = dir_elem->ino; 5413 min_key.type = BTRFS_DIR_ITEM_KEY; 5414 min_key.offset = 0; 5415 again: 5416 btrfs_release_path(path); 5417 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5418 if (ret < 0) { 5419 goto next_dir_inode; 5420 } else if (ret > 0) { 5421 ret = 0; 5422 goto next_dir_inode; 5423 } 5424 5425 process_leaf: 5426 leaf = path->nodes[0]; 5427 nritems = btrfs_header_nritems(leaf); 5428 for (i = path->slots[0]; i < nritems; i++) { 5429 struct btrfs_dir_item *di; 5430 struct btrfs_key di_key; 5431 struct inode *di_inode; 5432 struct btrfs_dir_list *new_dir_elem; 5433 int log_mode = LOG_INODE_EXISTS; 5434 int type; 5435 5436 btrfs_item_key_to_cpu(leaf, &min_key, i); 5437 if (min_key.objectid != dir_elem->ino || 5438 min_key.type != BTRFS_DIR_ITEM_KEY) 5439 goto next_dir_inode; 5440 5441 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5442 type = btrfs_dir_type(leaf, di); 5443 if (btrfs_dir_transid(leaf, di) < trans->transid && 5444 type != BTRFS_FT_DIR) 5445 continue; 5446 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5447 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5448 continue; 5449 5450 btrfs_release_path(path); 5451 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL); 5452 if (IS_ERR(di_inode)) { 5453 ret = PTR_ERR(di_inode); 5454 goto next_dir_inode; 5455 } 5456 5457 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { 5458 iput(di_inode); 5459 break; 5460 } 5461 5462 ctx->log_new_dentries = false; 5463 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5464 log_mode = LOG_INODE_ALL; 5465 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), 5466 log_mode, 0, LLONG_MAX, ctx); 5467 if (!ret && 5468 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) 5469 ret = 1; 5470 iput(di_inode); 5471 if (ret) 5472 goto next_dir_inode; 5473 if (ctx->log_new_dentries) { 5474 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5475 GFP_NOFS); 5476 if (!new_dir_elem) { 5477 ret = -ENOMEM; 5478 goto next_dir_inode; 5479 } 5480 new_dir_elem->ino = di_key.objectid; 5481 list_add_tail(&new_dir_elem->list, &dir_list); 5482 } 5483 break; 5484 } 5485 if (i == nritems) { 5486 ret = btrfs_next_leaf(log, path); 5487 if (ret < 0) { 5488 goto next_dir_inode; 5489 } else if (ret > 0) { 5490 ret = 0; 5491 goto next_dir_inode; 5492 } 5493 goto process_leaf; 5494 } 5495 if (min_key.offset < (u64)-1) { 5496 min_key.offset++; 5497 goto again; 5498 } 5499 next_dir_inode: 5500 list_del(&dir_elem->list); 5501 kfree(dir_elem); 5502 } 5503 5504 btrfs_free_path(path); 5505 return ret; 5506 } 5507 5508 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5509 struct btrfs_inode *inode, 5510 struct btrfs_log_ctx *ctx) 5511 { 5512 struct btrfs_fs_info *fs_info = trans->fs_info; 5513 int ret; 5514 struct btrfs_path *path; 5515 struct btrfs_key key; 5516 struct btrfs_root *root = inode->root; 5517 const u64 ino = btrfs_ino(inode); 5518 5519 path = btrfs_alloc_path(); 5520 if (!path) 5521 return -ENOMEM; 5522 path->skip_locking = 1; 5523 path->search_commit_root = 1; 5524 5525 key.objectid = ino; 5526 key.type = BTRFS_INODE_REF_KEY; 5527 key.offset = 0; 5528 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5529 if (ret < 0) 5530 goto out; 5531 5532 while (true) { 5533 struct extent_buffer *leaf = path->nodes[0]; 5534 int slot = path->slots[0]; 5535 u32 cur_offset = 0; 5536 u32 item_size; 5537 unsigned long ptr; 5538 5539 if (slot >= btrfs_header_nritems(leaf)) { 5540 ret = btrfs_next_leaf(root, path); 5541 if (ret < 0) 5542 goto out; 5543 else if (ret > 0) 5544 break; 5545 continue; 5546 } 5547 5548 btrfs_item_key_to_cpu(leaf, &key, slot); 5549 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5550 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5551 break; 5552 5553 item_size = btrfs_item_size_nr(leaf, slot); 5554 ptr = btrfs_item_ptr_offset(leaf, slot); 5555 while (cur_offset < item_size) { 5556 struct btrfs_key inode_key; 5557 struct inode *dir_inode; 5558 5559 inode_key.type = BTRFS_INODE_ITEM_KEY; 5560 inode_key.offset = 0; 5561 5562 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5563 struct btrfs_inode_extref *extref; 5564 5565 extref = (struct btrfs_inode_extref *) 5566 (ptr + cur_offset); 5567 inode_key.objectid = btrfs_inode_extref_parent( 5568 leaf, extref); 5569 cur_offset += sizeof(*extref); 5570 cur_offset += btrfs_inode_extref_name_len(leaf, 5571 extref); 5572 } else { 5573 inode_key.objectid = key.offset; 5574 cur_offset = item_size; 5575 } 5576 5577 dir_inode = btrfs_iget(fs_info->sb, &inode_key, 5578 root, NULL); 5579 /* 5580 * If the parent inode was deleted, return an error to 5581 * fallback to a transaction commit. This is to prevent 5582 * getting an inode that was moved from one parent A to 5583 * a parent B, got its former parent A deleted and then 5584 * it got fsync'ed, from existing at both parents after 5585 * a log replay (and the old parent still existing). 5586 * Example: 5587 * 5588 * mkdir /mnt/A 5589 * mkdir /mnt/B 5590 * touch /mnt/B/bar 5591 * sync 5592 * mv /mnt/B/bar /mnt/A/bar 5593 * mv -T /mnt/A /mnt/B 5594 * fsync /mnt/B/bar 5595 * <power fail> 5596 * 5597 * If we ignore the old parent B which got deleted, 5598 * after a log replay we would have file bar linked 5599 * at both parents and the old parent B would still 5600 * exist. 5601 */ 5602 if (IS_ERR(dir_inode)) { 5603 ret = PTR_ERR(dir_inode); 5604 goto out; 5605 } 5606 5607 if (ctx) 5608 ctx->log_new_dentries = false; 5609 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), 5610 LOG_INODE_ALL, 0, LLONG_MAX, ctx); 5611 if (!ret && 5612 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode))) 5613 ret = 1; 5614 if (!ret && ctx && ctx->log_new_dentries) 5615 ret = log_new_dir_dentries(trans, root, 5616 BTRFS_I(dir_inode), ctx); 5617 iput(dir_inode); 5618 if (ret) 5619 goto out; 5620 } 5621 path->slots[0]++; 5622 } 5623 ret = 0; 5624 out: 5625 btrfs_free_path(path); 5626 return ret; 5627 } 5628 5629 /* 5630 * helper function around btrfs_log_inode to make sure newly created 5631 * parent directories also end up in the log. A minimal inode and backref 5632 * only logging is done of any parent directories that are older than 5633 * the last committed transaction 5634 */ 5635 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 5636 struct btrfs_inode *inode, 5637 struct dentry *parent, 5638 const loff_t start, 5639 const loff_t end, 5640 int inode_only, 5641 struct btrfs_log_ctx *ctx) 5642 { 5643 struct btrfs_root *root = inode->root; 5644 struct btrfs_fs_info *fs_info = root->fs_info; 5645 struct super_block *sb; 5646 struct dentry *old_parent = NULL; 5647 int ret = 0; 5648 u64 last_committed = fs_info->last_trans_committed; 5649 bool log_dentries = false; 5650 struct btrfs_inode *orig_inode = inode; 5651 5652 sb = inode->vfs_inode.i_sb; 5653 5654 if (btrfs_test_opt(fs_info, NOTREELOG)) { 5655 ret = 1; 5656 goto end_no_trans; 5657 } 5658 5659 /* 5660 * The prev transaction commit doesn't complete, we need do 5661 * full commit by ourselves. 5662 */ 5663 if (fs_info->last_trans_log_full_commit > 5664 fs_info->last_trans_committed) { 5665 ret = 1; 5666 goto end_no_trans; 5667 } 5668 5669 if (btrfs_root_refs(&root->root_item) == 0) { 5670 ret = 1; 5671 goto end_no_trans; 5672 } 5673 5674 ret = check_parent_dirs_for_sync(trans, inode, parent, sb, 5675 last_committed); 5676 if (ret) 5677 goto end_no_trans; 5678 5679 /* 5680 * Skip already logged inodes or inodes corresponding to tmpfiles 5681 * (since logging them is pointless, a link count of 0 means they 5682 * will never be accessible). 5683 */ 5684 if (btrfs_inode_in_log(inode, trans->transid) || 5685 inode->vfs_inode.i_nlink == 0) { 5686 ret = BTRFS_NO_LOG_SYNC; 5687 goto end_no_trans; 5688 } 5689 5690 ret = start_log_trans(trans, root, ctx); 5691 if (ret) 5692 goto end_no_trans; 5693 5694 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); 5695 if (ret) 5696 goto end_trans; 5697 5698 /* 5699 * for regular files, if its inode is already on disk, we don't 5700 * have to worry about the parents at all. This is because 5701 * we can use the last_unlink_trans field to record renames 5702 * and other fun in this file. 5703 */ 5704 if (S_ISREG(inode->vfs_inode.i_mode) && 5705 inode->generation <= last_committed && 5706 inode->last_unlink_trans <= last_committed) { 5707 ret = 0; 5708 goto end_trans; 5709 } 5710 5711 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) 5712 log_dentries = true; 5713 5714 /* 5715 * On unlink we must make sure all our current and old parent directory 5716 * inodes are fully logged. This is to prevent leaving dangling 5717 * directory index entries in directories that were our parents but are 5718 * not anymore. Not doing this results in old parent directory being 5719 * impossible to delete after log replay (rmdir will always fail with 5720 * error -ENOTEMPTY). 5721 * 5722 * Example 1: 5723 * 5724 * mkdir testdir 5725 * touch testdir/foo 5726 * ln testdir/foo testdir/bar 5727 * sync 5728 * unlink testdir/bar 5729 * xfs_io -c fsync testdir/foo 5730 * <power failure> 5731 * mount fs, triggers log replay 5732 * 5733 * If we don't log the parent directory (testdir), after log replay the 5734 * directory still has an entry pointing to the file inode using the bar 5735 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 5736 * the file inode has a link count of 1. 5737 * 5738 * Example 2: 5739 * 5740 * mkdir testdir 5741 * touch foo 5742 * ln foo testdir/foo2 5743 * ln foo testdir/foo3 5744 * sync 5745 * unlink testdir/foo3 5746 * xfs_io -c fsync foo 5747 * <power failure> 5748 * mount fs, triggers log replay 5749 * 5750 * Similar as the first example, after log replay the parent directory 5751 * testdir still has an entry pointing to the inode file with name foo3 5752 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 5753 * and has a link count of 2. 5754 */ 5755 if (inode->last_unlink_trans > last_committed) { 5756 ret = btrfs_log_all_parents(trans, orig_inode, ctx); 5757 if (ret) 5758 goto end_trans; 5759 } 5760 5761 /* 5762 * If a new hard link was added to the inode in the current transaction 5763 * and its link count is now greater than 1, we need to fallback to a 5764 * transaction commit, otherwise we can end up not logging all its new 5765 * parents for all the hard links. Here just from the dentry used to 5766 * fsync, we can not visit the ancestor inodes for all the other hard 5767 * links to figure out if any is new, so we fallback to a transaction 5768 * commit (instead of adding a lot of complexity of scanning a btree, 5769 * since this scenario is not a common use case). 5770 */ 5771 if (inode->vfs_inode.i_nlink > 1 && 5772 inode->last_link_trans > last_committed) { 5773 ret = -EMLINK; 5774 goto end_trans; 5775 } 5776 5777 while (1) { 5778 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5779 break; 5780 5781 inode = BTRFS_I(d_inode(parent)); 5782 if (root != inode->root) 5783 break; 5784 5785 if (inode->generation > last_committed) { 5786 ret = btrfs_log_inode(trans, root, inode, 5787 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); 5788 if (ret) 5789 goto end_trans; 5790 } 5791 if (IS_ROOT(parent)) 5792 break; 5793 5794 parent = dget_parent(parent); 5795 dput(old_parent); 5796 old_parent = parent; 5797 } 5798 if (log_dentries) 5799 ret = log_new_dir_dentries(trans, root, orig_inode, ctx); 5800 else 5801 ret = 0; 5802 end_trans: 5803 dput(old_parent); 5804 if (ret < 0) { 5805 btrfs_set_log_full_commit(fs_info, trans); 5806 ret = 1; 5807 } 5808 5809 if (ret) 5810 btrfs_remove_log_ctx(root, ctx); 5811 btrfs_end_log_trans(root); 5812 end_no_trans: 5813 return ret; 5814 } 5815 5816 /* 5817 * it is not safe to log dentry if the chunk root has added new 5818 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 5819 * If this returns 1, you must commit the transaction to safely get your 5820 * data on disk. 5821 */ 5822 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 5823 struct dentry *dentry, 5824 const loff_t start, 5825 const loff_t end, 5826 struct btrfs_log_ctx *ctx) 5827 { 5828 struct dentry *parent = dget_parent(dentry); 5829 int ret; 5830 5831 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, 5832 start, end, LOG_INODE_ALL, ctx); 5833 dput(parent); 5834 5835 return ret; 5836 } 5837 5838 /* 5839 * should be called during mount to recover any replay any log trees 5840 * from the FS 5841 */ 5842 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 5843 { 5844 int ret; 5845 struct btrfs_path *path; 5846 struct btrfs_trans_handle *trans; 5847 struct btrfs_key key; 5848 struct btrfs_key found_key; 5849 struct btrfs_key tmp_key; 5850 struct btrfs_root *log; 5851 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 5852 struct walk_control wc = { 5853 .process_func = process_one_buffer, 5854 .stage = 0, 5855 }; 5856 5857 path = btrfs_alloc_path(); 5858 if (!path) 5859 return -ENOMEM; 5860 5861 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 5862 5863 trans = btrfs_start_transaction(fs_info->tree_root, 0); 5864 if (IS_ERR(trans)) { 5865 ret = PTR_ERR(trans); 5866 goto error; 5867 } 5868 5869 wc.trans = trans; 5870 wc.pin = 1; 5871 5872 ret = walk_log_tree(trans, log_root_tree, &wc); 5873 if (ret) { 5874 btrfs_handle_fs_error(fs_info, ret, 5875 "Failed to pin buffers while recovering log root tree."); 5876 goto error; 5877 } 5878 5879 again: 5880 key.objectid = BTRFS_TREE_LOG_OBJECTID; 5881 key.offset = (u64)-1; 5882 key.type = BTRFS_ROOT_ITEM_KEY; 5883 5884 while (1) { 5885 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 5886 5887 if (ret < 0) { 5888 btrfs_handle_fs_error(fs_info, ret, 5889 "Couldn't find tree log root."); 5890 goto error; 5891 } 5892 if (ret > 0) { 5893 if (path->slots[0] == 0) 5894 break; 5895 path->slots[0]--; 5896 } 5897 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 5898 path->slots[0]); 5899 btrfs_release_path(path); 5900 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 5901 break; 5902 5903 log = btrfs_read_fs_root(log_root_tree, &found_key); 5904 if (IS_ERR(log)) { 5905 ret = PTR_ERR(log); 5906 btrfs_handle_fs_error(fs_info, ret, 5907 "Couldn't read tree log root."); 5908 goto error; 5909 } 5910 5911 tmp_key.objectid = found_key.offset; 5912 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 5913 tmp_key.offset = (u64)-1; 5914 5915 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 5916 if (IS_ERR(wc.replay_dest)) { 5917 ret = PTR_ERR(wc.replay_dest); 5918 free_extent_buffer(log->node); 5919 free_extent_buffer(log->commit_root); 5920 kfree(log); 5921 btrfs_handle_fs_error(fs_info, ret, 5922 "Couldn't read target root for tree log recovery."); 5923 goto error; 5924 } 5925 5926 wc.replay_dest->log_root = log; 5927 btrfs_record_root_in_trans(trans, wc.replay_dest); 5928 ret = walk_log_tree(trans, log, &wc); 5929 5930 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 5931 ret = fixup_inode_link_counts(trans, wc.replay_dest, 5932 path); 5933 } 5934 5935 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 5936 struct btrfs_root *root = wc.replay_dest; 5937 5938 btrfs_release_path(path); 5939 5940 /* 5941 * We have just replayed everything, and the highest 5942 * objectid of fs roots probably has changed in case 5943 * some inode_item's got replayed. 5944 * 5945 * root->objectid_mutex is not acquired as log replay 5946 * could only happen during mount. 5947 */ 5948 ret = btrfs_find_highest_objectid(root, 5949 &root->highest_objectid); 5950 } 5951 5952 key.offset = found_key.offset - 1; 5953 wc.replay_dest->log_root = NULL; 5954 free_extent_buffer(log->node); 5955 free_extent_buffer(log->commit_root); 5956 kfree(log); 5957 5958 if (ret) 5959 goto error; 5960 5961 if (found_key.offset == 0) 5962 break; 5963 } 5964 btrfs_release_path(path); 5965 5966 /* step one is to pin it all, step two is to replay just inodes */ 5967 if (wc.pin) { 5968 wc.pin = 0; 5969 wc.process_func = replay_one_buffer; 5970 wc.stage = LOG_WALK_REPLAY_INODES; 5971 goto again; 5972 } 5973 /* step three is to replay everything */ 5974 if (wc.stage < LOG_WALK_REPLAY_ALL) { 5975 wc.stage++; 5976 goto again; 5977 } 5978 5979 btrfs_free_path(path); 5980 5981 /* step 4: commit the transaction, which also unpins the blocks */ 5982 ret = btrfs_commit_transaction(trans); 5983 if (ret) 5984 return ret; 5985 5986 free_extent_buffer(log_root_tree->node); 5987 log_root_tree->log_root = NULL; 5988 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 5989 kfree(log_root_tree); 5990 5991 return 0; 5992 error: 5993 if (wc.trans) 5994 btrfs_end_transaction(wc.trans); 5995 btrfs_free_path(path); 5996 return ret; 5997 } 5998 5999 /* 6000 * there are some corner cases where we want to force a full 6001 * commit instead of allowing a directory to be logged. 6002 * 6003 * They revolve around files there were unlinked from the directory, and 6004 * this function updates the parent directory so that a full commit is 6005 * properly done if it is fsync'd later after the unlinks are done. 6006 * 6007 * Must be called before the unlink operations (updates to the subvolume tree, 6008 * inodes, etc) are done. 6009 */ 6010 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 6011 struct btrfs_inode *dir, struct btrfs_inode *inode, 6012 int for_rename) 6013 { 6014 /* 6015 * when we're logging a file, if it hasn't been renamed 6016 * or unlinked, and its inode is fully committed on disk, 6017 * we don't have to worry about walking up the directory chain 6018 * to log its parents. 6019 * 6020 * So, we use the last_unlink_trans field to put this transid 6021 * into the file. When the file is logged we check it and 6022 * don't log the parents if the file is fully on disk. 6023 */ 6024 mutex_lock(&inode->log_mutex); 6025 inode->last_unlink_trans = trans->transid; 6026 mutex_unlock(&inode->log_mutex); 6027 6028 /* 6029 * if this directory was already logged any new 6030 * names for this file/dir will get recorded 6031 */ 6032 smp_mb(); 6033 if (dir->logged_trans == trans->transid) 6034 return; 6035 6036 /* 6037 * if the inode we're about to unlink was logged, 6038 * the log will be properly updated for any new names 6039 */ 6040 if (inode->logged_trans == trans->transid) 6041 return; 6042 6043 /* 6044 * when renaming files across directories, if the directory 6045 * there we're unlinking from gets fsync'd later on, there's 6046 * no way to find the destination directory later and fsync it 6047 * properly. So, we have to be conservative and force commits 6048 * so the new name gets discovered. 6049 */ 6050 if (for_rename) 6051 goto record; 6052 6053 /* we can safely do the unlink without any special recording */ 6054 return; 6055 6056 record: 6057 mutex_lock(&dir->log_mutex); 6058 dir->last_unlink_trans = trans->transid; 6059 mutex_unlock(&dir->log_mutex); 6060 } 6061 6062 /* 6063 * Make sure that if someone attempts to fsync the parent directory of a deleted 6064 * snapshot, it ends up triggering a transaction commit. This is to guarantee 6065 * that after replaying the log tree of the parent directory's root we will not 6066 * see the snapshot anymore and at log replay time we will not see any log tree 6067 * corresponding to the deleted snapshot's root, which could lead to replaying 6068 * it after replaying the log tree of the parent directory (which would replay 6069 * the snapshot delete operation). 6070 * 6071 * Must be called before the actual snapshot destroy operation (updates to the 6072 * parent root and tree of tree roots trees, etc) are done. 6073 */ 6074 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 6075 struct btrfs_inode *dir) 6076 { 6077 mutex_lock(&dir->log_mutex); 6078 dir->last_unlink_trans = trans->transid; 6079 mutex_unlock(&dir->log_mutex); 6080 } 6081 6082 /* 6083 * Call this after adding a new name for a file and it will properly 6084 * update the log to reflect the new name. 6085 * 6086 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's 6087 * true (because it's not used). 6088 * 6089 * Return value depends on whether @sync_log is true or false. 6090 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6091 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT 6092 * otherwise. 6093 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to 6094 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, 6095 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6096 * committed (without attempting to sync the log). 6097 */ 6098 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 6099 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6100 struct dentry *parent, 6101 bool sync_log, struct btrfs_log_ctx *ctx) 6102 { 6103 struct btrfs_fs_info *fs_info = trans->fs_info; 6104 int ret; 6105 6106 /* 6107 * this will force the logging code to walk the dentry chain 6108 * up for the file 6109 */ 6110 if (!S_ISDIR(inode->vfs_inode.i_mode)) 6111 inode->last_unlink_trans = trans->transid; 6112 6113 /* 6114 * if this inode hasn't been logged and directory we're renaming it 6115 * from hasn't been logged, we don't need to log it 6116 */ 6117 if (inode->logged_trans <= fs_info->last_trans_committed && 6118 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) 6119 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : 6120 BTRFS_DONT_NEED_LOG_SYNC; 6121 6122 if (sync_log) { 6123 struct btrfs_log_ctx ctx2; 6124 6125 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); 6126 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6127 LOG_INODE_EXISTS, &ctx2); 6128 if (ret == BTRFS_NO_LOG_SYNC) 6129 return BTRFS_DONT_NEED_TRANS_COMMIT; 6130 else if (ret) 6131 return BTRFS_NEED_TRANS_COMMIT; 6132 6133 ret = btrfs_sync_log(trans, inode->root, &ctx2); 6134 if (ret) 6135 return BTRFS_NEED_TRANS_COMMIT; 6136 return BTRFS_DONT_NEED_TRANS_COMMIT; 6137 } 6138 6139 ASSERT(ctx); 6140 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6141 LOG_INODE_EXISTS, ctx); 6142 if (ret == BTRFS_NO_LOG_SYNC) 6143 return BTRFS_DONT_NEED_LOG_SYNC; 6144 else if (ret) 6145 return BTRFS_NEED_TRANS_COMMIT; 6146 6147 return BTRFS_NEED_LOG_SYNC; 6148 } 6149 6150