1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/blkdev.h> 22 #include <linux/list_sort.h> 23 #include "tree-log.h" 24 #include "disk-io.h" 25 #include "locking.h" 26 #include "print-tree.h" 27 #include "backref.h" 28 #include "hash.h" 29 #include "compression.h" 30 #include "qgroup.h" 31 32 /* magic values for the inode_only field in btrfs_log_inode: 33 * 34 * LOG_INODE_ALL means to log everything 35 * LOG_INODE_EXISTS means to log just enough to recreate the inode 36 * during log replay 37 */ 38 #define LOG_INODE_ALL 0 39 #define LOG_INODE_EXISTS 1 40 41 /* 42 * directory trouble cases 43 * 44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 45 * log, we must force a full commit before doing an fsync of the directory 46 * where the unlink was done. 47 * ---> record transid of last unlink/rename per directory 48 * 49 * mkdir foo/some_dir 50 * normal commit 51 * rename foo/some_dir foo2/some_dir 52 * mkdir foo/some_dir 53 * fsync foo/some_dir/some_file 54 * 55 * The fsync above will unlink the original some_dir without recording 56 * it in its new location (foo2). After a crash, some_dir will be gone 57 * unless the fsync of some_file forces a full commit 58 * 59 * 2) we must log any new names for any file or dir that is in the fsync 60 * log. ---> check inode while renaming/linking. 61 * 62 * 2a) we must log any new names for any file or dir during rename 63 * when the directory they are being removed from was logged. 64 * ---> check inode and old parent dir during rename 65 * 66 * 2a is actually the more important variant. With the extra logging 67 * a crash might unlink the old name without recreating the new one 68 * 69 * 3) after a crash, we must go through any directories with a link count 70 * of zero and redo the rm -rf 71 * 72 * mkdir f1/foo 73 * normal commit 74 * rm -rf f1/foo 75 * fsync(f1) 76 * 77 * The directory f1 was fully removed from the FS, but fsync was never 78 * called on f1, only its parent dir. After a crash the rm -rf must 79 * be replayed. This must be able to recurse down the entire 80 * directory tree. The inode link count fixup code takes care of the 81 * ugly details. 82 */ 83 84 /* 85 * stages for the tree walking. The first 86 * stage (0) is to only pin down the blocks we find 87 * the second stage (1) is to make sure that all the inodes 88 * we find in the log are created in the subvolume. 89 * 90 * The last stage is to deal with directories and links and extents 91 * and all the other fun semantics 92 */ 93 #define LOG_WALK_PIN_ONLY 0 94 #define LOG_WALK_REPLAY_INODES 1 95 #define LOG_WALK_REPLAY_DIR_INDEX 2 96 #define LOG_WALK_REPLAY_ALL 3 97 98 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 99 struct btrfs_root *root, struct inode *inode, 100 int inode_only, 101 const loff_t start, 102 const loff_t end, 103 struct btrfs_log_ctx *ctx); 104 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 105 struct btrfs_root *root, 106 struct btrfs_path *path, u64 objectid); 107 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 108 struct btrfs_root *root, 109 struct btrfs_root *log, 110 struct btrfs_path *path, 111 u64 dirid, int del_all); 112 113 /* 114 * tree logging is a special write ahead log used to make sure that 115 * fsyncs and O_SYNCs can happen without doing full tree commits. 116 * 117 * Full tree commits are expensive because they require commonly 118 * modified blocks to be recowed, creating many dirty pages in the 119 * extent tree an 4x-6x higher write load than ext3. 120 * 121 * Instead of doing a tree commit on every fsync, we use the 122 * key ranges and transaction ids to find items for a given file or directory 123 * that have changed in this transaction. Those items are copied into 124 * a special tree (one per subvolume root), that tree is written to disk 125 * and then the fsync is considered complete. 126 * 127 * After a crash, items are copied out of the log-tree back into the 128 * subvolume tree. Any file data extents found are recorded in the extent 129 * allocation tree, and the log-tree freed. 130 * 131 * The log tree is read three times, once to pin down all the extents it is 132 * using in ram and once, once to create all the inodes logged in the tree 133 * and once to do all the other items. 134 */ 135 136 /* 137 * start a sub transaction and setup the log tree 138 * this increments the log tree writer count to make the people 139 * syncing the tree wait for us to finish 140 */ 141 static int start_log_trans(struct btrfs_trans_handle *trans, 142 struct btrfs_root *root, 143 struct btrfs_log_ctx *ctx) 144 { 145 int ret = 0; 146 147 mutex_lock(&root->log_mutex); 148 149 if (root->log_root) { 150 if (btrfs_need_log_full_commit(root->fs_info, trans)) { 151 ret = -EAGAIN; 152 goto out; 153 } 154 155 if (!root->log_start_pid) { 156 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 157 root->log_start_pid = current->pid; 158 } else if (root->log_start_pid != current->pid) { 159 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 160 } 161 } else { 162 mutex_lock(&root->fs_info->tree_log_mutex); 163 if (!root->fs_info->log_root_tree) 164 ret = btrfs_init_log_root_tree(trans, root->fs_info); 165 mutex_unlock(&root->fs_info->tree_log_mutex); 166 if (ret) 167 goto out; 168 169 ret = btrfs_add_log_tree(trans, root); 170 if (ret) 171 goto out; 172 173 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 174 root->log_start_pid = current->pid; 175 } 176 177 atomic_inc(&root->log_batch); 178 atomic_inc(&root->log_writers); 179 if (ctx) { 180 int index = root->log_transid % 2; 181 list_add_tail(&ctx->list, &root->log_ctxs[index]); 182 ctx->log_transid = root->log_transid; 183 } 184 185 out: 186 mutex_unlock(&root->log_mutex); 187 return ret; 188 } 189 190 /* 191 * returns 0 if there was a log transaction running and we were able 192 * to join, or returns -ENOENT if there were not transactions 193 * in progress 194 */ 195 static int join_running_log_trans(struct btrfs_root *root) 196 { 197 int ret = -ENOENT; 198 199 smp_mb(); 200 if (!root->log_root) 201 return -ENOENT; 202 203 mutex_lock(&root->log_mutex); 204 if (root->log_root) { 205 ret = 0; 206 atomic_inc(&root->log_writers); 207 } 208 mutex_unlock(&root->log_mutex); 209 return ret; 210 } 211 212 /* 213 * This either makes the current running log transaction wait 214 * until you call btrfs_end_log_trans() or it makes any future 215 * log transactions wait until you call btrfs_end_log_trans() 216 */ 217 int btrfs_pin_log_trans(struct btrfs_root *root) 218 { 219 int ret = -ENOENT; 220 221 mutex_lock(&root->log_mutex); 222 atomic_inc(&root->log_writers); 223 mutex_unlock(&root->log_mutex); 224 return ret; 225 } 226 227 /* 228 * indicate we're done making changes to the log tree 229 * and wake up anyone waiting to do a sync 230 */ 231 void btrfs_end_log_trans(struct btrfs_root *root) 232 { 233 if (atomic_dec_and_test(&root->log_writers)) { 234 /* 235 * Implicit memory barrier after atomic_dec_and_test 236 */ 237 if (waitqueue_active(&root->log_writer_wait)) 238 wake_up(&root->log_writer_wait); 239 } 240 } 241 242 243 /* 244 * the walk control struct is used to pass state down the chain when 245 * processing the log tree. The stage field tells us which part 246 * of the log tree processing we are currently doing. The others 247 * are state fields used for that specific part 248 */ 249 struct walk_control { 250 /* should we free the extent on disk when done? This is used 251 * at transaction commit time while freeing a log tree 252 */ 253 int free; 254 255 /* should we write out the extent buffer? This is used 256 * while flushing the log tree to disk during a sync 257 */ 258 int write; 259 260 /* should we wait for the extent buffer io to finish? Also used 261 * while flushing the log tree to disk for a sync 262 */ 263 int wait; 264 265 /* pin only walk, we record which extents on disk belong to the 266 * log trees 267 */ 268 int pin; 269 270 /* what stage of the replay code we're currently in */ 271 int stage; 272 273 /* the root we are currently replaying */ 274 struct btrfs_root *replay_dest; 275 276 /* the trans handle for the current replay */ 277 struct btrfs_trans_handle *trans; 278 279 /* the function that gets used to process blocks we find in the 280 * tree. Note the extent_buffer might not be up to date when it is 281 * passed in, and it must be checked or read if you need the data 282 * inside it 283 */ 284 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 285 struct walk_control *wc, u64 gen); 286 }; 287 288 /* 289 * process_func used to pin down extents, write them or wait on them 290 */ 291 static int process_one_buffer(struct btrfs_root *log, 292 struct extent_buffer *eb, 293 struct walk_control *wc, u64 gen) 294 { 295 int ret = 0; 296 297 /* 298 * If this fs is mixed then we need to be able to process the leaves to 299 * pin down any logged extents, so we have to read the block. 300 */ 301 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) { 302 ret = btrfs_read_buffer(eb, gen); 303 if (ret) 304 return ret; 305 } 306 307 if (wc->pin) 308 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root, 309 eb->start, eb->len); 310 311 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 312 if (wc->pin && btrfs_header_level(eb) == 0) 313 ret = btrfs_exclude_logged_extents(log, eb); 314 if (wc->write) 315 btrfs_write_tree_block(eb); 316 if (wc->wait) 317 btrfs_wait_tree_block_writeback(eb); 318 } 319 return ret; 320 } 321 322 /* 323 * Item overwrite used by replay and tree logging. eb, slot and key all refer 324 * to the src data we are copying out. 325 * 326 * root is the tree we are copying into, and path is a scratch 327 * path for use in this function (it should be released on entry and 328 * will be released on exit). 329 * 330 * If the key is already in the destination tree the existing item is 331 * overwritten. If the existing item isn't big enough, it is extended. 332 * If it is too large, it is truncated. 333 * 334 * If the key isn't in the destination yet, a new item is inserted. 335 */ 336 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 337 struct btrfs_root *root, 338 struct btrfs_path *path, 339 struct extent_buffer *eb, int slot, 340 struct btrfs_key *key) 341 { 342 int ret; 343 u32 item_size; 344 u64 saved_i_size = 0; 345 int save_old_i_size = 0; 346 unsigned long src_ptr; 347 unsigned long dst_ptr; 348 int overwrite_root = 0; 349 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 350 351 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 352 overwrite_root = 1; 353 354 item_size = btrfs_item_size_nr(eb, slot); 355 src_ptr = btrfs_item_ptr_offset(eb, slot); 356 357 /* look for the key in the destination tree */ 358 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 359 if (ret < 0) 360 return ret; 361 362 if (ret == 0) { 363 char *src_copy; 364 char *dst_copy; 365 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 366 path->slots[0]); 367 if (dst_size != item_size) 368 goto insert; 369 370 if (item_size == 0) { 371 btrfs_release_path(path); 372 return 0; 373 } 374 dst_copy = kmalloc(item_size, GFP_NOFS); 375 src_copy = kmalloc(item_size, GFP_NOFS); 376 if (!dst_copy || !src_copy) { 377 btrfs_release_path(path); 378 kfree(dst_copy); 379 kfree(src_copy); 380 return -ENOMEM; 381 } 382 383 read_extent_buffer(eb, src_copy, src_ptr, item_size); 384 385 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 386 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 387 item_size); 388 ret = memcmp(dst_copy, src_copy, item_size); 389 390 kfree(dst_copy); 391 kfree(src_copy); 392 /* 393 * they have the same contents, just return, this saves 394 * us from cowing blocks in the destination tree and doing 395 * extra writes that may not have been done by a previous 396 * sync 397 */ 398 if (ret == 0) { 399 btrfs_release_path(path); 400 return 0; 401 } 402 403 /* 404 * We need to load the old nbytes into the inode so when we 405 * replay the extents we've logged we get the right nbytes. 406 */ 407 if (inode_item) { 408 struct btrfs_inode_item *item; 409 u64 nbytes; 410 u32 mode; 411 412 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 413 struct btrfs_inode_item); 414 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 415 item = btrfs_item_ptr(eb, slot, 416 struct btrfs_inode_item); 417 btrfs_set_inode_nbytes(eb, item, nbytes); 418 419 /* 420 * If this is a directory we need to reset the i_size to 421 * 0 so that we can set it up properly when replaying 422 * the rest of the items in this log. 423 */ 424 mode = btrfs_inode_mode(eb, item); 425 if (S_ISDIR(mode)) 426 btrfs_set_inode_size(eb, item, 0); 427 } 428 } else if (inode_item) { 429 struct btrfs_inode_item *item; 430 u32 mode; 431 432 /* 433 * New inode, set nbytes to 0 so that the nbytes comes out 434 * properly when we replay the extents. 435 */ 436 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 437 btrfs_set_inode_nbytes(eb, item, 0); 438 439 /* 440 * If this is a directory we need to reset the i_size to 0 so 441 * that we can set it up properly when replaying the rest of 442 * the items in this log. 443 */ 444 mode = btrfs_inode_mode(eb, item); 445 if (S_ISDIR(mode)) 446 btrfs_set_inode_size(eb, item, 0); 447 } 448 insert: 449 btrfs_release_path(path); 450 /* try to insert the key into the destination tree */ 451 path->skip_release_on_error = 1; 452 ret = btrfs_insert_empty_item(trans, root, path, 453 key, item_size); 454 path->skip_release_on_error = 0; 455 456 /* make sure any existing item is the correct size */ 457 if (ret == -EEXIST || ret == -EOVERFLOW) { 458 u32 found_size; 459 found_size = btrfs_item_size_nr(path->nodes[0], 460 path->slots[0]); 461 if (found_size > item_size) 462 btrfs_truncate_item(root, path, item_size, 1); 463 else if (found_size < item_size) 464 btrfs_extend_item(root, path, 465 item_size - found_size); 466 } else if (ret) { 467 return ret; 468 } 469 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 470 path->slots[0]); 471 472 /* don't overwrite an existing inode if the generation number 473 * was logged as zero. This is done when the tree logging code 474 * is just logging an inode to make sure it exists after recovery. 475 * 476 * Also, don't overwrite i_size on directories during replay. 477 * log replay inserts and removes directory items based on the 478 * state of the tree found in the subvolume, and i_size is modified 479 * as it goes 480 */ 481 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 482 struct btrfs_inode_item *src_item; 483 struct btrfs_inode_item *dst_item; 484 485 src_item = (struct btrfs_inode_item *)src_ptr; 486 dst_item = (struct btrfs_inode_item *)dst_ptr; 487 488 if (btrfs_inode_generation(eb, src_item) == 0) { 489 struct extent_buffer *dst_eb = path->nodes[0]; 490 const u64 ino_size = btrfs_inode_size(eb, src_item); 491 492 /* 493 * For regular files an ino_size == 0 is used only when 494 * logging that an inode exists, as part of a directory 495 * fsync, and the inode wasn't fsynced before. In this 496 * case don't set the size of the inode in the fs/subvol 497 * tree, otherwise we would be throwing valid data away. 498 */ 499 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 500 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 501 ino_size != 0) { 502 struct btrfs_map_token token; 503 504 btrfs_init_map_token(&token); 505 btrfs_set_token_inode_size(dst_eb, dst_item, 506 ino_size, &token); 507 } 508 goto no_copy; 509 } 510 511 if (overwrite_root && 512 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 513 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 514 save_old_i_size = 1; 515 saved_i_size = btrfs_inode_size(path->nodes[0], 516 dst_item); 517 } 518 } 519 520 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 521 src_ptr, item_size); 522 523 if (save_old_i_size) { 524 struct btrfs_inode_item *dst_item; 525 dst_item = (struct btrfs_inode_item *)dst_ptr; 526 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 527 } 528 529 /* make sure the generation is filled in */ 530 if (key->type == BTRFS_INODE_ITEM_KEY) { 531 struct btrfs_inode_item *dst_item; 532 dst_item = (struct btrfs_inode_item *)dst_ptr; 533 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 534 btrfs_set_inode_generation(path->nodes[0], dst_item, 535 trans->transid); 536 } 537 } 538 no_copy: 539 btrfs_mark_buffer_dirty(path->nodes[0]); 540 btrfs_release_path(path); 541 return 0; 542 } 543 544 /* 545 * simple helper to read an inode off the disk from a given root 546 * This can only be called for subvolume roots and not for the log 547 */ 548 static noinline struct inode *read_one_inode(struct btrfs_root *root, 549 u64 objectid) 550 { 551 struct btrfs_key key; 552 struct inode *inode; 553 554 key.objectid = objectid; 555 key.type = BTRFS_INODE_ITEM_KEY; 556 key.offset = 0; 557 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 558 if (IS_ERR(inode)) { 559 inode = NULL; 560 } else if (is_bad_inode(inode)) { 561 iput(inode); 562 inode = NULL; 563 } 564 return inode; 565 } 566 567 /* replays a single extent in 'eb' at 'slot' with 'key' into the 568 * subvolume 'root'. path is released on entry and should be released 569 * on exit. 570 * 571 * extents in the log tree have not been allocated out of the extent 572 * tree yet. So, this completes the allocation, taking a reference 573 * as required if the extent already exists or creating a new extent 574 * if it isn't in the extent allocation tree yet. 575 * 576 * The extent is inserted into the file, dropping any existing extents 577 * from the file that overlap the new one. 578 */ 579 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 580 struct btrfs_root *root, 581 struct btrfs_path *path, 582 struct extent_buffer *eb, int slot, 583 struct btrfs_key *key) 584 { 585 int found_type; 586 u64 extent_end; 587 u64 start = key->offset; 588 u64 nbytes = 0; 589 struct btrfs_file_extent_item *item; 590 struct inode *inode = NULL; 591 unsigned long size; 592 int ret = 0; 593 594 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 595 found_type = btrfs_file_extent_type(eb, item); 596 597 if (found_type == BTRFS_FILE_EXTENT_REG || 598 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 599 nbytes = btrfs_file_extent_num_bytes(eb, item); 600 extent_end = start + nbytes; 601 602 /* 603 * We don't add to the inodes nbytes if we are prealloc or a 604 * hole. 605 */ 606 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 607 nbytes = 0; 608 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 609 size = btrfs_file_extent_inline_len(eb, slot, item); 610 nbytes = btrfs_file_extent_ram_bytes(eb, item); 611 extent_end = ALIGN(start + size, root->sectorsize); 612 } else { 613 ret = 0; 614 goto out; 615 } 616 617 inode = read_one_inode(root, key->objectid); 618 if (!inode) { 619 ret = -EIO; 620 goto out; 621 } 622 623 /* 624 * first check to see if we already have this extent in the 625 * file. This must be done before the btrfs_drop_extents run 626 * so we don't try to drop this extent. 627 */ 628 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 629 start, 0); 630 631 if (ret == 0 && 632 (found_type == BTRFS_FILE_EXTENT_REG || 633 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 634 struct btrfs_file_extent_item cmp1; 635 struct btrfs_file_extent_item cmp2; 636 struct btrfs_file_extent_item *existing; 637 struct extent_buffer *leaf; 638 639 leaf = path->nodes[0]; 640 existing = btrfs_item_ptr(leaf, path->slots[0], 641 struct btrfs_file_extent_item); 642 643 read_extent_buffer(eb, &cmp1, (unsigned long)item, 644 sizeof(cmp1)); 645 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 646 sizeof(cmp2)); 647 648 /* 649 * we already have a pointer to this exact extent, 650 * we don't have to do anything 651 */ 652 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 653 btrfs_release_path(path); 654 goto out; 655 } 656 } 657 btrfs_release_path(path); 658 659 /* drop any overlapping extents */ 660 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 661 if (ret) 662 goto out; 663 664 if (found_type == BTRFS_FILE_EXTENT_REG || 665 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 666 u64 offset; 667 unsigned long dest_offset; 668 struct btrfs_key ins; 669 670 ret = btrfs_insert_empty_item(trans, root, path, key, 671 sizeof(*item)); 672 if (ret) 673 goto out; 674 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 675 path->slots[0]); 676 copy_extent_buffer(path->nodes[0], eb, dest_offset, 677 (unsigned long)item, sizeof(*item)); 678 679 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 680 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 681 ins.type = BTRFS_EXTENT_ITEM_KEY; 682 offset = key->offset - btrfs_file_extent_offset(eb, item); 683 684 /* 685 * Manually record dirty extent, as here we did a shallow 686 * file extent item copy and skip normal backref update, 687 * but modifying extent tree all by ourselves. 688 * So need to manually record dirty extent for qgroup, 689 * as the owner of the file extent changed from log tree 690 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 691 */ 692 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info, 693 btrfs_file_extent_disk_bytenr(eb, item), 694 btrfs_file_extent_disk_num_bytes(eb, item), 695 GFP_NOFS); 696 if (ret < 0) 697 goto out; 698 699 if (ins.objectid > 0) { 700 u64 csum_start; 701 u64 csum_end; 702 LIST_HEAD(ordered_sums); 703 /* 704 * is this extent already allocated in the extent 705 * allocation tree? If so, just add a reference 706 */ 707 ret = btrfs_lookup_data_extent(root, ins.objectid, 708 ins.offset); 709 if (ret == 0) { 710 ret = btrfs_inc_extent_ref(trans, root, 711 ins.objectid, ins.offset, 712 0, root->root_key.objectid, 713 key->objectid, offset); 714 if (ret) 715 goto out; 716 } else { 717 /* 718 * insert the extent pointer in the extent 719 * allocation tree 720 */ 721 ret = btrfs_alloc_logged_file_extent(trans, 722 root, root->root_key.objectid, 723 key->objectid, offset, &ins); 724 if (ret) 725 goto out; 726 } 727 btrfs_release_path(path); 728 729 if (btrfs_file_extent_compression(eb, item)) { 730 csum_start = ins.objectid; 731 csum_end = csum_start + ins.offset; 732 } else { 733 csum_start = ins.objectid + 734 btrfs_file_extent_offset(eb, item); 735 csum_end = csum_start + 736 btrfs_file_extent_num_bytes(eb, item); 737 } 738 739 ret = btrfs_lookup_csums_range(root->log_root, 740 csum_start, csum_end - 1, 741 &ordered_sums, 0); 742 if (ret) 743 goto out; 744 /* 745 * Now delete all existing cums in the csum root that 746 * cover our range. We do this because we can have an 747 * extent that is completely referenced by one file 748 * extent item and partially referenced by another 749 * file extent item (like after using the clone or 750 * extent_same ioctls). In this case if we end up doing 751 * the replay of the one that partially references the 752 * extent first, and we do not do the csum deletion 753 * below, we can get 2 csum items in the csum tree that 754 * overlap each other. For example, imagine our log has 755 * the two following file extent items: 756 * 757 * key (257 EXTENT_DATA 409600) 758 * extent data disk byte 12845056 nr 102400 759 * extent data offset 20480 nr 20480 ram 102400 760 * 761 * key (257 EXTENT_DATA 819200) 762 * extent data disk byte 12845056 nr 102400 763 * extent data offset 0 nr 102400 ram 102400 764 * 765 * Where the second one fully references the 100K extent 766 * that starts at disk byte 12845056, and the log tree 767 * has a single csum item that covers the entire range 768 * of the extent: 769 * 770 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 771 * 772 * After the first file extent item is replayed, the 773 * csum tree gets the following csum item: 774 * 775 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 776 * 777 * Which covers the 20K sub-range starting at offset 20K 778 * of our extent. Now when we replay the second file 779 * extent item, if we do not delete existing csum items 780 * that cover any of its blocks, we end up getting two 781 * csum items in our csum tree that overlap each other: 782 * 783 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 784 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 785 * 786 * Which is a problem, because after this anyone trying 787 * to lookup up for the checksum of any block of our 788 * extent starting at an offset of 40K or higher, will 789 * end up looking at the second csum item only, which 790 * does not contain the checksum for any block starting 791 * at offset 40K or higher of our extent. 792 */ 793 while (!list_empty(&ordered_sums)) { 794 struct btrfs_ordered_sum *sums; 795 sums = list_entry(ordered_sums.next, 796 struct btrfs_ordered_sum, 797 list); 798 if (!ret) 799 ret = btrfs_del_csums(trans, 800 root->fs_info->csum_root, 801 sums->bytenr, 802 sums->len); 803 if (!ret) 804 ret = btrfs_csum_file_blocks(trans, 805 root->fs_info->csum_root, 806 sums); 807 list_del(&sums->list); 808 kfree(sums); 809 } 810 if (ret) 811 goto out; 812 } else { 813 btrfs_release_path(path); 814 } 815 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 816 /* inline extents are easy, we just overwrite them */ 817 ret = overwrite_item(trans, root, path, eb, slot, key); 818 if (ret) 819 goto out; 820 } 821 822 inode_add_bytes(inode, nbytes); 823 ret = btrfs_update_inode(trans, root, inode); 824 out: 825 if (inode) 826 iput(inode); 827 return ret; 828 } 829 830 /* 831 * when cleaning up conflicts between the directory names in the 832 * subvolume, directory names in the log and directory names in the 833 * inode back references, we may have to unlink inodes from directories. 834 * 835 * This is a helper function to do the unlink of a specific directory 836 * item 837 */ 838 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 839 struct btrfs_root *root, 840 struct btrfs_path *path, 841 struct inode *dir, 842 struct btrfs_dir_item *di) 843 { 844 struct inode *inode; 845 char *name; 846 int name_len; 847 struct extent_buffer *leaf; 848 struct btrfs_key location; 849 int ret; 850 851 leaf = path->nodes[0]; 852 853 btrfs_dir_item_key_to_cpu(leaf, di, &location); 854 name_len = btrfs_dir_name_len(leaf, di); 855 name = kmalloc(name_len, GFP_NOFS); 856 if (!name) 857 return -ENOMEM; 858 859 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 860 btrfs_release_path(path); 861 862 inode = read_one_inode(root, location.objectid); 863 if (!inode) { 864 ret = -EIO; 865 goto out; 866 } 867 868 ret = link_to_fixup_dir(trans, root, path, location.objectid); 869 if (ret) 870 goto out; 871 872 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 873 if (ret) 874 goto out; 875 else 876 ret = btrfs_run_delayed_items(trans, root); 877 out: 878 kfree(name); 879 iput(inode); 880 return ret; 881 } 882 883 /* 884 * helper function to see if a given name and sequence number found 885 * in an inode back reference are already in a directory and correctly 886 * point to this inode 887 */ 888 static noinline int inode_in_dir(struct btrfs_root *root, 889 struct btrfs_path *path, 890 u64 dirid, u64 objectid, u64 index, 891 const char *name, int name_len) 892 { 893 struct btrfs_dir_item *di; 894 struct btrfs_key location; 895 int match = 0; 896 897 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 898 index, name, name_len, 0); 899 if (di && !IS_ERR(di)) { 900 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 901 if (location.objectid != objectid) 902 goto out; 903 } else 904 goto out; 905 btrfs_release_path(path); 906 907 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 908 if (di && !IS_ERR(di)) { 909 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 910 if (location.objectid != objectid) 911 goto out; 912 } else 913 goto out; 914 match = 1; 915 out: 916 btrfs_release_path(path); 917 return match; 918 } 919 920 /* 921 * helper function to check a log tree for a named back reference in 922 * an inode. This is used to decide if a back reference that is 923 * found in the subvolume conflicts with what we find in the log. 924 * 925 * inode backreferences may have multiple refs in a single item, 926 * during replay we process one reference at a time, and we don't 927 * want to delete valid links to a file from the subvolume if that 928 * link is also in the log. 929 */ 930 static noinline int backref_in_log(struct btrfs_root *log, 931 struct btrfs_key *key, 932 u64 ref_objectid, 933 const char *name, int namelen) 934 { 935 struct btrfs_path *path; 936 struct btrfs_inode_ref *ref; 937 unsigned long ptr; 938 unsigned long ptr_end; 939 unsigned long name_ptr; 940 int found_name_len; 941 int item_size; 942 int ret; 943 int match = 0; 944 945 path = btrfs_alloc_path(); 946 if (!path) 947 return -ENOMEM; 948 949 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 950 if (ret != 0) 951 goto out; 952 953 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 954 955 if (key->type == BTRFS_INODE_EXTREF_KEY) { 956 if (btrfs_find_name_in_ext_backref(path, ref_objectid, 957 name, namelen, NULL)) 958 match = 1; 959 960 goto out; 961 } 962 963 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 964 ptr_end = ptr + item_size; 965 while (ptr < ptr_end) { 966 ref = (struct btrfs_inode_ref *)ptr; 967 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 968 if (found_name_len == namelen) { 969 name_ptr = (unsigned long)(ref + 1); 970 ret = memcmp_extent_buffer(path->nodes[0], name, 971 name_ptr, namelen); 972 if (ret == 0) { 973 match = 1; 974 goto out; 975 } 976 } 977 ptr = (unsigned long)(ref + 1) + found_name_len; 978 } 979 out: 980 btrfs_free_path(path); 981 return match; 982 } 983 984 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 985 struct btrfs_root *root, 986 struct btrfs_path *path, 987 struct btrfs_root *log_root, 988 struct inode *dir, struct inode *inode, 989 struct extent_buffer *eb, 990 u64 inode_objectid, u64 parent_objectid, 991 u64 ref_index, char *name, int namelen, 992 int *search_done) 993 { 994 int ret; 995 char *victim_name; 996 int victim_name_len; 997 struct extent_buffer *leaf; 998 struct btrfs_dir_item *di; 999 struct btrfs_key search_key; 1000 struct btrfs_inode_extref *extref; 1001 1002 again: 1003 /* Search old style refs */ 1004 search_key.objectid = inode_objectid; 1005 search_key.type = BTRFS_INODE_REF_KEY; 1006 search_key.offset = parent_objectid; 1007 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1008 if (ret == 0) { 1009 struct btrfs_inode_ref *victim_ref; 1010 unsigned long ptr; 1011 unsigned long ptr_end; 1012 1013 leaf = path->nodes[0]; 1014 1015 /* are we trying to overwrite a back ref for the root directory 1016 * if so, just jump out, we're done 1017 */ 1018 if (search_key.objectid == search_key.offset) 1019 return 1; 1020 1021 /* check all the names in this back reference to see 1022 * if they are in the log. if so, we allow them to stay 1023 * otherwise they must be unlinked as a conflict 1024 */ 1025 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1026 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1027 while (ptr < ptr_end) { 1028 victim_ref = (struct btrfs_inode_ref *)ptr; 1029 victim_name_len = btrfs_inode_ref_name_len(leaf, 1030 victim_ref); 1031 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1032 if (!victim_name) 1033 return -ENOMEM; 1034 1035 read_extent_buffer(leaf, victim_name, 1036 (unsigned long)(victim_ref + 1), 1037 victim_name_len); 1038 1039 if (!backref_in_log(log_root, &search_key, 1040 parent_objectid, 1041 victim_name, 1042 victim_name_len)) { 1043 inc_nlink(inode); 1044 btrfs_release_path(path); 1045 1046 ret = btrfs_unlink_inode(trans, root, dir, 1047 inode, victim_name, 1048 victim_name_len); 1049 kfree(victim_name); 1050 if (ret) 1051 return ret; 1052 ret = btrfs_run_delayed_items(trans, root); 1053 if (ret) 1054 return ret; 1055 *search_done = 1; 1056 goto again; 1057 } 1058 kfree(victim_name); 1059 1060 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1061 } 1062 1063 /* 1064 * NOTE: we have searched root tree and checked the 1065 * corresponding ref, it does not need to check again. 1066 */ 1067 *search_done = 1; 1068 } 1069 btrfs_release_path(path); 1070 1071 /* Same search but for extended refs */ 1072 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1073 inode_objectid, parent_objectid, 0, 1074 0); 1075 if (!IS_ERR_OR_NULL(extref)) { 1076 u32 item_size; 1077 u32 cur_offset = 0; 1078 unsigned long base; 1079 struct inode *victim_parent; 1080 1081 leaf = path->nodes[0]; 1082 1083 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1084 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1085 1086 while (cur_offset < item_size) { 1087 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1088 1089 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1090 1091 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1092 goto next; 1093 1094 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1095 if (!victim_name) 1096 return -ENOMEM; 1097 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1098 victim_name_len); 1099 1100 search_key.objectid = inode_objectid; 1101 search_key.type = BTRFS_INODE_EXTREF_KEY; 1102 search_key.offset = btrfs_extref_hash(parent_objectid, 1103 victim_name, 1104 victim_name_len); 1105 ret = 0; 1106 if (!backref_in_log(log_root, &search_key, 1107 parent_objectid, victim_name, 1108 victim_name_len)) { 1109 ret = -ENOENT; 1110 victim_parent = read_one_inode(root, 1111 parent_objectid); 1112 if (victim_parent) { 1113 inc_nlink(inode); 1114 btrfs_release_path(path); 1115 1116 ret = btrfs_unlink_inode(trans, root, 1117 victim_parent, 1118 inode, 1119 victim_name, 1120 victim_name_len); 1121 if (!ret) 1122 ret = btrfs_run_delayed_items( 1123 trans, root); 1124 } 1125 iput(victim_parent); 1126 kfree(victim_name); 1127 if (ret) 1128 return ret; 1129 *search_done = 1; 1130 goto again; 1131 } 1132 kfree(victim_name); 1133 if (ret) 1134 return ret; 1135 next: 1136 cur_offset += victim_name_len + sizeof(*extref); 1137 } 1138 *search_done = 1; 1139 } 1140 btrfs_release_path(path); 1141 1142 /* look for a conflicting sequence number */ 1143 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1144 ref_index, name, namelen, 0); 1145 if (di && !IS_ERR(di)) { 1146 ret = drop_one_dir_item(trans, root, path, dir, di); 1147 if (ret) 1148 return ret; 1149 } 1150 btrfs_release_path(path); 1151 1152 /* look for a conflicing name */ 1153 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1154 name, namelen, 0); 1155 if (di && !IS_ERR(di)) { 1156 ret = drop_one_dir_item(trans, root, path, dir, di); 1157 if (ret) 1158 return ret; 1159 } 1160 btrfs_release_path(path); 1161 1162 return 0; 1163 } 1164 1165 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1166 u32 *namelen, char **name, u64 *index, 1167 u64 *parent_objectid) 1168 { 1169 struct btrfs_inode_extref *extref; 1170 1171 extref = (struct btrfs_inode_extref *)ref_ptr; 1172 1173 *namelen = btrfs_inode_extref_name_len(eb, extref); 1174 *name = kmalloc(*namelen, GFP_NOFS); 1175 if (*name == NULL) 1176 return -ENOMEM; 1177 1178 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1179 *namelen); 1180 1181 *index = btrfs_inode_extref_index(eb, extref); 1182 if (parent_objectid) 1183 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1184 1185 return 0; 1186 } 1187 1188 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1189 u32 *namelen, char **name, u64 *index) 1190 { 1191 struct btrfs_inode_ref *ref; 1192 1193 ref = (struct btrfs_inode_ref *)ref_ptr; 1194 1195 *namelen = btrfs_inode_ref_name_len(eb, ref); 1196 *name = kmalloc(*namelen, GFP_NOFS); 1197 if (*name == NULL) 1198 return -ENOMEM; 1199 1200 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1201 1202 *index = btrfs_inode_ref_index(eb, ref); 1203 1204 return 0; 1205 } 1206 1207 /* 1208 * replay one inode back reference item found in the log tree. 1209 * eb, slot and key refer to the buffer and key found in the log tree. 1210 * root is the destination we are replaying into, and path is for temp 1211 * use by this function. (it should be released on return). 1212 */ 1213 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1214 struct btrfs_root *root, 1215 struct btrfs_root *log, 1216 struct btrfs_path *path, 1217 struct extent_buffer *eb, int slot, 1218 struct btrfs_key *key) 1219 { 1220 struct inode *dir = NULL; 1221 struct inode *inode = NULL; 1222 unsigned long ref_ptr; 1223 unsigned long ref_end; 1224 char *name = NULL; 1225 int namelen; 1226 int ret; 1227 int search_done = 0; 1228 int log_ref_ver = 0; 1229 u64 parent_objectid; 1230 u64 inode_objectid; 1231 u64 ref_index = 0; 1232 int ref_struct_size; 1233 1234 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1235 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1236 1237 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1238 struct btrfs_inode_extref *r; 1239 1240 ref_struct_size = sizeof(struct btrfs_inode_extref); 1241 log_ref_ver = 1; 1242 r = (struct btrfs_inode_extref *)ref_ptr; 1243 parent_objectid = btrfs_inode_extref_parent(eb, r); 1244 } else { 1245 ref_struct_size = sizeof(struct btrfs_inode_ref); 1246 parent_objectid = key->offset; 1247 } 1248 inode_objectid = key->objectid; 1249 1250 /* 1251 * it is possible that we didn't log all the parent directories 1252 * for a given inode. If we don't find the dir, just don't 1253 * copy the back ref in. The link count fixup code will take 1254 * care of the rest 1255 */ 1256 dir = read_one_inode(root, parent_objectid); 1257 if (!dir) { 1258 ret = -ENOENT; 1259 goto out; 1260 } 1261 1262 inode = read_one_inode(root, inode_objectid); 1263 if (!inode) { 1264 ret = -EIO; 1265 goto out; 1266 } 1267 1268 while (ref_ptr < ref_end) { 1269 if (log_ref_ver) { 1270 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1271 &ref_index, &parent_objectid); 1272 /* 1273 * parent object can change from one array 1274 * item to another. 1275 */ 1276 if (!dir) 1277 dir = read_one_inode(root, parent_objectid); 1278 if (!dir) { 1279 ret = -ENOENT; 1280 goto out; 1281 } 1282 } else { 1283 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1284 &ref_index); 1285 } 1286 if (ret) 1287 goto out; 1288 1289 /* if we already have a perfect match, we're done */ 1290 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), 1291 ref_index, name, namelen)) { 1292 /* 1293 * look for a conflicting back reference in the 1294 * metadata. if we find one we have to unlink that name 1295 * of the file before we add our new link. Later on, we 1296 * overwrite any existing back reference, and we don't 1297 * want to create dangling pointers in the directory. 1298 */ 1299 1300 if (!search_done) { 1301 ret = __add_inode_ref(trans, root, path, log, 1302 dir, inode, eb, 1303 inode_objectid, 1304 parent_objectid, 1305 ref_index, name, namelen, 1306 &search_done); 1307 if (ret) { 1308 if (ret == 1) 1309 ret = 0; 1310 goto out; 1311 } 1312 } 1313 1314 /* insert our name */ 1315 ret = btrfs_add_link(trans, dir, inode, name, namelen, 1316 0, ref_index); 1317 if (ret) 1318 goto out; 1319 1320 btrfs_update_inode(trans, root, inode); 1321 } 1322 1323 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1324 kfree(name); 1325 name = NULL; 1326 if (log_ref_ver) { 1327 iput(dir); 1328 dir = NULL; 1329 } 1330 } 1331 1332 /* finally write the back reference in the inode */ 1333 ret = overwrite_item(trans, root, path, eb, slot, key); 1334 out: 1335 btrfs_release_path(path); 1336 kfree(name); 1337 iput(dir); 1338 iput(inode); 1339 return ret; 1340 } 1341 1342 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1343 struct btrfs_root *root, u64 ino) 1344 { 1345 int ret; 1346 1347 ret = btrfs_insert_orphan_item(trans, root, ino); 1348 if (ret == -EEXIST) 1349 ret = 0; 1350 1351 return ret; 1352 } 1353 1354 static int count_inode_extrefs(struct btrfs_root *root, 1355 struct inode *inode, struct btrfs_path *path) 1356 { 1357 int ret = 0; 1358 int name_len; 1359 unsigned int nlink = 0; 1360 u32 item_size; 1361 u32 cur_offset = 0; 1362 u64 inode_objectid = btrfs_ino(inode); 1363 u64 offset = 0; 1364 unsigned long ptr; 1365 struct btrfs_inode_extref *extref; 1366 struct extent_buffer *leaf; 1367 1368 while (1) { 1369 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1370 &extref, &offset); 1371 if (ret) 1372 break; 1373 1374 leaf = path->nodes[0]; 1375 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1376 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1377 cur_offset = 0; 1378 1379 while (cur_offset < item_size) { 1380 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1381 name_len = btrfs_inode_extref_name_len(leaf, extref); 1382 1383 nlink++; 1384 1385 cur_offset += name_len + sizeof(*extref); 1386 } 1387 1388 offset++; 1389 btrfs_release_path(path); 1390 } 1391 btrfs_release_path(path); 1392 1393 if (ret < 0 && ret != -ENOENT) 1394 return ret; 1395 return nlink; 1396 } 1397 1398 static int count_inode_refs(struct btrfs_root *root, 1399 struct inode *inode, struct btrfs_path *path) 1400 { 1401 int ret; 1402 struct btrfs_key key; 1403 unsigned int nlink = 0; 1404 unsigned long ptr; 1405 unsigned long ptr_end; 1406 int name_len; 1407 u64 ino = btrfs_ino(inode); 1408 1409 key.objectid = ino; 1410 key.type = BTRFS_INODE_REF_KEY; 1411 key.offset = (u64)-1; 1412 1413 while (1) { 1414 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1415 if (ret < 0) 1416 break; 1417 if (ret > 0) { 1418 if (path->slots[0] == 0) 1419 break; 1420 path->slots[0]--; 1421 } 1422 process_slot: 1423 btrfs_item_key_to_cpu(path->nodes[0], &key, 1424 path->slots[0]); 1425 if (key.objectid != ino || 1426 key.type != BTRFS_INODE_REF_KEY) 1427 break; 1428 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1429 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1430 path->slots[0]); 1431 while (ptr < ptr_end) { 1432 struct btrfs_inode_ref *ref; 1433 1434 ref = (struct btrfs_inode_ref *)ptr; 1435 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1436 ref); 1437 ptr = (unsigned long)(ref + 1) + name_len; 1438 nlink++; 1439 } 1440 1441 if (key.offset == 0) 1442 break; 1443 if (path->slots[0] > 0) { 1444 path->slots[0]--; 1445 goto process_slot; 1446 } 1447 key.offset--; 1448 btrfs_release_path(path); 1449 } 1450 btrfs_release_path(path); 1451 1452 return nlink; 1453 } 1454 1455 /* 1456 * There are a few corners where the link count of the file can't 1457 * be properly maintained during replay. So, instead of adding 1458 * lots of complexity to the log code, we just scan the backrefs 1459 * for any file that has been through replay. 1460 * 1461 * The scan will update the link count on the inode to reflect the 1462 * number of back refs found. If it goes down to zero, the iput 1463 * will free the inode. 1464 */ 1465 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1466 struct btrfs_root *root, 1467 struct inode *inode) 1468 { 1469 struct btrfs_path *path; 1470 int ret; 1471 u64 nlink = 0; 1472 u64 ino = btrfs_ino(inode); 1473 1474 path = btrfs_alloc_path(); 1475 if (!path) 1476 return -ENOMEM; 1477 1478 ret = count_inode_refs(root, inode, path); 1479 if (ret < 0) 1480 goto out; 1481 1482 nlink = ret; 1483 1484 ret = count_inode_extrefs(root, inode, path); 1485 if (ret < 0) 1486 goto out; 1487 1488 nlink += ret; 1489 1490 ret = 0; 1491 1492 if (nlink != inode->i_nlink) { 1493 set_nlink(inode, nlink); 1494 btrfs_update_inode(trans, root, inode); 1495 } 1496 BTRFS_I(inode)->index_cnt = (u64)-1; 1497 1498 if (inode->i_nlink == 0) { 1499 if (S_ISDIR(inode->i_mode)) { 1500 ret = replay_dir_deletes(trans, root, NULL, path, 1501 ino, 1); 1502 if (ret) 1503 goto out; 1504 } 1505 ret = insert_orphan_item(trans, root, ino); 1506 } 1507 1508 out: 1509 btrfs_free_path(path); 1510 return ret; 1511 } 1512 1513 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1514 struct btrfs_root *root, 1515 struct btrfs_path *path) 1516 { 1517 int ret; 1518 struct btrfs_key key; 1519 struct inode *inode; 1520 1521 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1522 key.type = BTRFS_ORPHAN_ITEM_KEY; 1523 key.offset = (u64)-1; 1524 while (1) { 1525 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1526 if (ret < 0) 1527 break; 1528 1529 if (ret == 1) { 1530 if (path->slots[0] == 0) 1531 break; 1532 path->slots[0]--; 1533 } 1534 1535 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1536 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1537 key.type != BTRFS_ORPHAN_ITEM_KEY) 1538 break; 1539 1540 ret = btrfs_del_item(trans, root, path); 1541 if (ret) 1542 goto out; 1543 1544 btrfs_release_path(path); 1545 inode = read_one_inode(root, key.offset); 1546 if (!inode) 1547 return -EIO; 1548 1549 ret = fixup_inode_link_count(trans, root, inode); 1550 iput(inode); 1551 if (ret) 1552 goto out; 1553 1554 /* 1555 * fixup on a directory may create new entries, 1556 * make sure we always look for the highset possible 1557 * offset 1558 */ 1559 key.offset = (u64)-1; 1560 } 1561 ret = 0; 1562 out: 1563 btrfs_release_path(path); 1564 return ret; 1565 } 1566 1567 1568 /* 1569 * record a given inode in the fixup dir so we can check its link 1570 * count when replay is done. The link count is incremented here 1571 * so the inode won't go away until we check it 1572 */ 1573 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1574 struct btrfs_root *root, 1575 struct btrfs_path *path, 1576 u64 objectid) 1577 { 1578 struct btrfs_key key; 1579 int ret = 0; 1580 struct inode *inode; 1581 1582 inode = read_one_inode(root, objectid); 1583 if (!inode) 1584 return -EIO; 1585 1586 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1587 key.type = BTRFS_ORPHAN_ITEM_KEY; 1588 key.offset = objectid; 1589 1590 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1591 1592 btrfs_release_path(path); 1593 if (ret == 0) { 1594 if (!inode->i_nlink) 1595 set_nlink(inode, 1); 1596 else 1597 inc_nlink(inode); 1598 ret = btrfs_update_inode(trans, root, inode); 1599 } else if (ret == -EEXIST) { 1600 ret = 0; 1601 } else { 1602 BUG(); /* Logic Error */ 1603 } 1604 iput(inode); 1605 1606 return ret; 1607 } 1608 1609 /* 1610 * when replaying the log for a directory, we only insert names 1611 * for inodes that actually exist. This means an fsync on a directory 1612 * does not implicitly fsync all the new files in it 1613 */ 1614 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1615 struct btrfs_root *root, 1616 u64 dirid, u64 index, 1617 char *name, int name_len, 1618 struct btrfs_key *location) 1619 { 1620 struct inode *inode; 1621 struct inode *dir; 1622 int ret; 1623 1624 inode = read_one_inode(root, location->objectid); 1625 if (!inode) 1626 return -ENOENT; 1627 1628 dir = read_one_inode(root, dirid); 1629 if (!dir) { 1630 iput(inode); 1631 return -EIO; 1632 } 1633 1634 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1635 1636 /* FIXME, put inode into FIXUP list */ 1637 1638 iput(inode); 1639 iput(dir); 1640 return ret; 1641 } 1642 1643 /* 1644 * Return true if an inode reference exists in the log for the given name, 1645 * inode and parent inode. 1646 */ 1647 static bool name_in_log_ref(struct btrfs_root *log_root, 1648 const char *name, const int name_len, 1649 const u64 dirid, const u64 ino) 1650 { 1651 struct btrfs_key search_key; 1652 1653 search_key.objectid = ino; 1654 search_key.type = BTRFS_INODE_REF_KEY; 1655 search_key.offset = dirid; 1656 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1657 return true; 1658 1659 search_key.type = BTRFS_INODE_EXTREF_KEY; 1660 search_key.offset = btrfs_extref_hash(dirid, name, name_len); 1661 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1662 return true; 1663 1664 return false; 1665 } 1666 1667 /* 1668 * take a single entry in a log directory item and replay it into 1669 * the subvolume. 1670 * 1671 * if a conflicting item exists in the subdirectory already, 1672 * the inode it points to is unlinked and put into the link count 1673 * fix up tree. 1674 * 1675 * If a name from the log points to a file or directory that does 1676 * not exist in the FS, it is skipped. fsyncs on directories 1677 * do not force down inodes inside that directory, just changes to the 1678 * names or unlinks in a directory. 1679 * 1680 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1681 * non-existing inode) and 1 if the name was replayed. 1682 */ 1683 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1684 struct btrfs_root *root, 1685 struct btrfs_path *path, 1686 struct extent_buffer *eb, 1687 struct btrfs_dir_item *di, 1688 struct btrfs_key *key) 1689 { 1690 char *name; 1691 int name_len; 1692 struct btrfs_dir_item *dst_di; 1693 struct btrfs_key found_key; 1694 struct btrfs_key log_key; 1695 struct inode *dir; 1696 u8 log_type; 1697 int exists; 1698 int ret = 0; 1699 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1700 bool name_added = false; 1701 1702 dir = read_one_inode(root, key->objectid); 1703 if (!dir) 1704 return -EIO; 1705 1706 name_len = btrfs_dir_name_len(eb, di); 1707 name = kmalloc(name_len, GFP_NOFS); 1708 if (!name) { 1709 ret = -ENOMEM; 1710 goto out; 1711 } 1712 1713 log_type = btrfs_dir_type(eb, di); 1714 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1715 name_len); 1716 1717 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1718 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1719 if (exists == 0) 1720 exists = 1; 1721 else 1722 exists = 0; 1723 btrfs_release_path(path); 1724 1725 if (key->type == BTRFS_DIR_ITEM_KEY) { 1726 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1727 name, name_len, 1); 1728 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1729 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1730 key->objectid, 1731 key->offset, name, 1732 name_len, 1); 1733 } else { 1734 /* Corruption */ 1735 ret = -EINVAL; 1736 goto out; 1737 } 1738 if (IS_ERR_OR_NULL(dst_di)) { 1739 /* we need a sequence number to insert, so we only 1740 * do inserts for the BTRFS_DIR_INDEX_KEY types 1741 */ 1742 if (key->type != BTRFS_DIR_INDEX_KEY) 1743 goto out; 1744 goto insert; 1745 } 1746 1747 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1748 /* the existing item matches the logged item */ 1749 if (found_key.objectid == log_key.objectid && 1750 found_key.type == log_key.type && 1751 found_key.offset == log_key.offset && 1752 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1753 update_size = false; 1754 goto out; 1755 } 1756 1757 /* 1758 * don't drop the conflicting directory entry if the inode 1759 * for the new entry doesn't exist 1760 */ 1761 if (!exists) 1762 goto out; 1763 1764 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1765 if (ret) 1766 goto out; 1767 1768 if (key->type == BTRFS_DIR_INDEX_KEY) 1769 goto insert; 1770 out: 1771 btrfs_release_path(path); 1772 if (!ret && update_size) { 1773 btrfs_i_size_write(dir, dir->i_size + name_len * 2); 1774 ret = btrfs_update_inode(trans, root, dir); 1775 } 1776 kfree(name); 1777 iput(dir); 1778 if (!ret && name_added) 1779 ret = 1; 1780 return ret; 1781 1782 insert: 1783 if (name_in_log_ref(root->log_root, name, name_len, 1784 key->objectid, log_key.objectid)) { 1785 /* The dentry will be added later. */ 1786 ret = 0; 1787 update_size = false; 1788 goto out; 1789 } 1790 btrfs_release_path(path); 1791 ret = insert_one_name(trans, root, key->objectid, key->offset, 1792 name, name_len, &log_key); 1793 if (ret && ret != -ENOENT && ret != -EEXIST) 1794 goto out; 1795 if (!ret) 1796 name_added = true; 1797 update_size = false; 1798 ret = 0; 1799 goto out; 1800 } 1801 1802 /* 1803 * find all the names in a directory item and reconcile them into 1804 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1805 * one name in a directory item, but the same code gets used for 1806 * both directory index types 1807 */ 1808 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1809 struct btrfs_root *root, 1810 struct btrfs_path *path, 1811 struct extent_buffer *eb, int slot, 1812 struct btrfs_key *key) 1813 { 1814 int ret = 0; 1815 u32 item_size = btrfs_item_size_nr(eb, slot); 1816 struct btrfs_dir_item *di; 1817 int name_len; 1818 unsigned long ptr; 1819 unsigned long ptr_end; 1820 struct btrfs_path *fixup_path = NULL; 1821 1822 ptr = btrfs_item_ptr_offset(eb, slot); 1823 ptr_end = ptr + item_size; 1824 while (ptr < ptr_end) { 1825 di = (struct btrfs_dir_item *)ptr; 1826 if (verify_dir_item(root, eb, di)) 1827 return -EIO; 1828 name_len = btrfs_dir_name_len(eb, di); 1829 ret = replay_one_name(trans, root, path, eb, di, key); 1830 if (ret < 0) 1831 break; 1832 ptr = (unsigned long)(di + 1); 1833 ptr += name_len; 1834 1835 /* 1836 * If this entry refers to a non-directory (directories can not 1837 * have a link count > 1) and it was added in the transaction 1838 * that was not committed, make sure we fixup the link count of 1839 * the inode it the entry points to. Otherwise something like 1840 * the following would result in a directory pointing to an 1841 * inode with a wrong link that does not account for this dir 1842 * entry: 1843 * 1844 * mkdir testdir 1845 * touch testdir/foo 1846 * touch testdir/bar 1847 * sync 1848 * 1849 * ln testdir/bar testdir/bar_link 1850 * ln testdir/foo testdir/foo_link 1851 * xfs_io -c "fsync" testdir/bar 1852 * 1853 * <power failure> 1854 * 1855 * mount fs, log replay happens 1856 * 1857 * File foo would remain with a link count of 1 when it has two 1858 * entries pointing to it in the directory testdir. This would 1859 * make it impossible to ever delete the parent directory has 1860 * it would result in stale dentries that can never be deleted. 1861 */ 1862 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 1863 struct btrfs_key di_key; 1864 1865 if (!fixup_path) { 1866 fixup_path = btrfs_alloc_path(); 1867 if (!fixup_path) { 1868 ret = -ENOMEM; 1869 break; 1870 } 1871 } 1872 1873 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1874 ret = link_to_fixup_dir(trans, root, fixup_path, 1875 di_key.objectid); 1876 if (ret) 1877 break; 1878 } 1879 ret = 0; 1880 } 1881 btrfs_free_path(fixup_path); 1882 return ret; 1883 } 1884 1885 /* 1886 * directory replay has two parts. There are the standard directory 1887 * items in the log copied from the subvolume, and range items 1888 * created in the log while the subvolume was logged. 1889 * 1890 * The range items tell us which parts of the key space the log 1891 * is authoritative for. During replay, if a key in the subvolume 1892 * directory is in a logged range item, but not actually in the log 1893 * that means it was deleted from the directory before the fsync 1894 * and should be removed. 1895 */ 1896 static noinline int find_dir_range(struct btrfs_root *root, 1897 struct btrfs_path *path, 1898 u64 dirid, int key_type, 1899 u64 *start_ret, u64 *end_ret) 1900 { 1901 struct btrfs_key key; 1902 u64 found_end; 1903 struct btrfs_dir_log_item *item; 1904 int ret; 1905 int nritems; 1906 1907 if (*start_ret == (u64)-1) 1908 return 1; 1909 1910 key.objectid = dirid; 1911 key.type = key_type; 1912 key.offset = *start_ret; 1913 1914 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1915 if (ret < 0) 1916 goto out; 1917 if (ret > 0) { 1918 if (path->slots[0] == 0) 1919 goto out; 1920 path->slots[0]--; 1921 } 1922 if (ret != 0) 1923 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1924 1925 if (key.type != key_type || key.objectid != dirid) { 1926 ret = 1; 1927 goto next; 1928 } 1929 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1930 struct btrfs_dir_log_item); 1931 found_end = btrfs_dir_log_end(path->nodes[0], item); 1932 1933 if (*start_ret >= key.offset && *start_ret <= found_end) { 1934 ret = 0; 1935 *start_ret = key.offset; 1936 *end_ret = found_end; 1937 goto out; 1938 } 1939 ret = 1; 1940 next: 1941 /* check the next slot in the tree to see if it is a valid item */ 1942 nritems = btrfs_header_nritems(path->nodes[0]); 1943 if (path->slots[0] >= nritems) { 1944 ret = btrfs_next_leaf(root, path); 1945 if (ret) 1946 goto out; 1947 } else { 1948 path->slots[0]++; 1949 } 1950 1951 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1952 1953 if (key.type != key_type || key.objectid != dirid) { 1954 ret = 1; 1955 goto out; 1956 } 1957 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1958 struct btrfs_dir_log_item); 1959 found_end = btrfs_dir_log_end(path->nodes[0], item); 1960 *start_ret = key.offset; 1961 *end_ret = found_end; 1962 ret = 0; 1963 out: 1964 btrfs_release_path(path); 1965 return ret; 1966 } 1967 1968 /* 1969 * this looks for a given directory item in the log. If the directory 1970 * item is not in the log, the item is removed and the inode it points 1971 * to is unlinked 1972 */ 1973 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 1974 struct btrfs_root *root, 1975 struct btrfs_root *log, 1976 struct btrfs_path *path, 1977 struct btrfs_path *log_path, 1978 struct inode *dir, 1979 struct btrfs_key *dir_key) 1980 { 1981 int ret; 1982 struct extent_buffer *eb; 1983 int slot; 1984 u32 item_size; 1985 struct btrfs_dir_item *di; 1986 struct btrfs_dir_item *log_di; 1987 int name_len; 1988 unsigned long ptr; 1989 unsigned long ptr_end; 1990 char *name; 1991 struct inode *inode; 1992 struct btrfs_key location; 1993 1994 again: 1995 eb = path->nodes[0]; 1996 slot = path->slots[0]; 1997 item_size = btrfs_item_size_nr(eb, slot); 1998 ptr = btrfs_item_ptr_offset(eb, slot); 1999 ptr_end = ptr + item_size; 2000 while (ptr < ptr_end) { 2001 di = (struct btrfs_dir_item *)ptr; 2002 if (verify_dir_item(root, eb, di)) { 2003 ret = -EIO; 2004 goto out; 2005 } 2006 2007 name_len = btrfs_dir_name_len(eb, di); 2008 name = kmalloc(name_len, GFP_NOFS); 2009 if (!name) { 2010 ret = -ENOMEM; 2011 goto out; 2012 } 2013 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2014 name_len); 2015 log_di = NULL; 2016 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2017 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2018 dir_key->objectid, 2019 name, name_len, 0); 2020 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2021 log_di = btrfs_lookup_dir_index_item(trans, log, 2022 log_path, 2023 dir_key->objectid, 2024 dir_key->offset, 2025 name, name_len, 0); 2026 } 2027 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) { 2028 btrfs_dir_item_key_to_cpu(eb, di, &location); 2029 btrfs_release_path(path); 2030 btrfs_release_path(log_path); 2031 inode = read_one_inode(root, location.objectid); 2032 if (!inode) { 2033 kfree(name); 2034 return -EIO; 2035 } 2036 2037 ret = link_to_fixup_dir(trans, root, 2038 path, location.objectid); 2039 if (ret) { 2040 kfree(name); 2041 iput(inode); 2042 goto out; 2043 } 2044 2045 inc_nlink(inode); 2046 ret = btrfs_unlink_inode(trans, root, dir, inode, 2047 name, name_len); 2048 if (!ret) 2049 ret = btrfs_run_delayed_items(trans, root); 2050 kfree(name); 2051 iput(inode); 2052 if (ret) 2053 goto out; 2054 2055 /* there might still be more names under this key 2056 * check and repeat if required 2057 */ 2058 ret = btrfs_search_slot(NULL, root, dir_key, path, 2059 0, 0); 2060 if (ret == 0) 2061 goto again; 2062 ret = 0; 2063 goto out; 2064 } else if (IS_ERR(log_di)) { 2065 kfree(name); 2066 return PTR_ERR(log_di); 2067 } 2068 btrfs_release_path(log_path); 2069 kfree(name); 2070 2071 ptr = (unsigned long)(di + 1); 2072 ptr += name_len; 2073 } 2074 ret = 0; 2075 out: 2076 btrfs_release_path(path); 2077 btrfs_release_path(log_path); 2078 return ret; 2079 } 2080 2081 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2082 struct btrfs_root *root, 2083 struct btrfs_root *log, 2084 struct btrfs_path *path, 2085 const u64 ino) 2086 { 2087 struct btrfs_key search_key; 2088 struct btrfs_path *log_path; 2089 int i; 2090 int nritems; 2091 int ret; 2092 2093 log_path = btrfs_alloc_path(); 2094 if (!log_path) 2095 return -ENOMEM; 2096 2097 search_key.objectid = ino; 2098 search_key.type = BTRFS_XATTR_ITEM_KEY; 2099 search_key.offset = 0; 2100 again: 2101 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2102 if (ret < 0) 2103 goto out; 2104 process_leaf: 2105 nritems = btrfs_header_nritems(path->nodes[0]); 2106 for (i = path->slots[0]; i < nritems; i++) { 2107 struct btrfs_key key; 2108 struct btrfs_dir_item *di; 2109 struct btrfs_dir_item *log_di; 2110 u32 total_size; 2111 u32 cur; 2112 2113 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2114 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2115 ret = 0; 2116 goto out; 2117 } 2118 2119 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2120 total_size = btrfs_item_size_nr(path->nodes[0], i); 2121 cur = 0; 2122 while (cur < total_size) { 2123 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2124 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2125 u32 this_len = sizeof(*di) + name_len + data_len; 2126 char *name; 2127 2128 name = kmalloc(name_len, GFP_NOFS); 2129 if (!name) { 2130 ret = -ENOMEM; 2131 goto out; 2132 } 2133 read_extent_buffer(path->nodes[0], name, 2134 (unsigned long)(di + 1), name_len); 2135 2136 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2137 name, name_len, 0); 2138 btrfs_release_path(log_path); 2139 if (!log_di) { 2140 /* Doesn't exist in log tree, so delete it. */ 2141 btrfs_release_path(path); 2142 di = btrfs_lookup_xattr(trans, root, path, ino, 2143 name, name_len, -1); 2144 kfree(name); 2145 if (IS_ERR(di)) { 2146 ret = PTR_ERR(di); 2147 goto out; 2148 } 2149 ASSERT(di); 2150 ret = btrfs_delete_one_dir_name(trans, root, 2151 path, di); 2152 if (ret) 2153 goto out; 2154 btrfs_release_path(path); 2155 search_key = key; 2156 goto again; 2157 } 2158 kfree(name); 2159 if (IS_ERR(log_di)) { 2160 ret = PTR_ERR(log_di); 2161 goto out; 2162 } 2163 cur += this_len; 2164 di = (struct btrfs_dir_item *)((char *)di + this_len); 2165 } 2166 } 2167 ret = btrfs_next_leaf(root, path); 2168 if (ret > 0) 2169 ret = 0; 2170 else if (ret == 0) 2171 goto process_leaf; 2172 out: 2173 btrfs_free_path(log_path); 2174 btrfs_release_path(path); 2175 return ret; 2176 } 2177 2178 2179 /* 2180 * deletion replay happens before we copy any new directory items 2181 * out of the log or out of backreferences from inodes. It 2182 * scans the log to find ranges of keys that log is authoritative for, 2183 * and then scans the directory to find items in those ranges that are 2184 * not present in the log. 2185 * 2186 * Anything we don't find in the log is unlinked and removed from the 2187 * directory. 2188 */ 2189 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2190 struct btrfs_root *root, 2191 struct btrfs_root *log, 2192 struct btrfs_path *path, 2193 u64 dirid, int del_all) 2194 { 2195 u64 range_start; 2196 u64 range_end; 2197 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2198 int ret = 0; 2199 struct btrfs_key dir_key; 2200 struct btrfs_key found_key; 2201 struct btrfs_path *log_path; 2202 struct inode *dir; 2203 2204 dir_key.objectid = dirid; 2205 dir_key.type = BTRFS_DIR_ITEM_KEY; 2206 log_path = btrfs_alloc_path(); 2207 if (!log_path) 2208 return -ENOMEM; 2209 2210 dir = read_one_inode(root, dirid); 2211 /* it isn't an error if the inode isn't there, that can happen 2212 * because we replay the deletes before we copy in the inode item 2213 * from the log 2214 */ 2215 if (!dir) { 2216 btrfs_free_path(log_path); 2217 return 0; 2218 } 2219 again: 2220 range_start = 0; 2221 range_end = 0; 2222 while (1) { 2223 if (del_all) 2224 range_end = (u64)-1; 2225 else { 2226 ret = find_dir_range(log, path, dirid, key_type, 2227 &range_start, &range_end); 2228 if (ret != 0) 2229 break; 2230 } 2231 2232 dir_key.offset = range_start; 2233 while (1) { 2234 int nritems; 2235 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2236 0, 0); 2237 if (ret < 0) 2238 goto out; 2239 2240 nritems = btrfs_header_nritems(path->nodes[0]); 2241 if (path->slots[0] >= nritems) { 2242 ret = btrfs_next_leaf(root, path); 2243 if (ret) 2244 break; 2245 } 2246 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2247 path->slots[0]); 2248 if (found_key.objectid != dirid || 2249 found_key.type != dir_key.type) 2250 goto next_type; 2251 2252 if (found_key.offset > range_end) 2253 break; 2254 2255 ret = check_item_in_log(trans, root, log, path, 2256 log_path, dir, 2257 &found_key); 2258 if (ret) 2259 goto out; 2260 if (found_key.offset == (u64)-1) 2261 break; 2262 dir_key.offset = found_key.offset + 1; 2263 } 2264 btrfs_release_path(path); 2265 if (range_end == (u64)-1) 2266 break; 2267 range_start = range_end + 1; 2268 } 2269 2270 next_type: 2271 ret = 0; 2272 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2273 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2274 dir_key.type = BTRFS_DIR_INDEX_KEY; 2275 btrfs_release_path(path); 2276 goto again; 2277 } 2278 out: 2279 btrfs_release_path(path); 2280 btrfs_free_path(log_path); 2281 iput(dir); 2282 return ret; 2283 } 2284 2285 /* 2286 * the process_func used to replay items from the log tree. This 2287 * gets called in two different stages. The first stage just looks 2288 * for inodes and makes sure they are all copied into the subvolume. 2289 * 2290 * The second stage copies all the other item types from the log into 2291 * the subvolume. The two stage approach is slower, but gets rid of 2292 * lots of complexity around inodes referencing other inodes that exist 2293 * only in the log (references come from either directory items or inode 2294 * back refs). 2295 */ 2296 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2297 struct walk_control *wc, u64 gen) 2298 { 2299 int nritems; 2300 struct btrfs_path *path; 2301 struct btrfs_root *root = wc->replay_dest; 2302 struct btrfs_key key; 2303 int level; 2304 int i; 2305 int ret; 2306 2307 ret = btrfs_read_buffer(eb, gen); 2308 if (ret) 2309 return ret; 2310 2311 level = btrfs_header_level(eb); 2312 2313 if (level != 0) 2314 return 0; 2315 2316 path = btrfs_alloc_path(); 2317 if (!path) 2318 return -ENOMEM; 2319 2320 nritems = btrfs_header_nritems(eb); 2321 for (i = 0; i < nritems; i++) { 2322 btrfs_item_key_to_cpu(eb, &key, i); 2323 2324 /* inode keys are done during the first stage */ 2325 if (key.type == BTRFS_INODE_ITEM_KEY && 2326 wc->stage == LOG_WALK_REPLAY_INODES) { 2327 struct btrfs_inode_item *inode_item; 2328 u32 mode; 2329 2330 inode_item = btrfs_item_ptr(eb, i, 2331 struct btrfs_inode_item); 2332 ret = replay_xattr_deletes(wc->trans, root, log, 2333 path, key.objectid); 2334 if (ret) 2335 break; 2336 mode = btrfs_inode_mode(eb, inode_item); 2337 if (S_ISDIR(mode)) { 2338 ret = replay_dir_deletes(wc->trans, 2339 root, log, path, key.objectid, 0); 2340 if (ret) 2341 break; 2342 } 2343 ret = overwrite_item(wc->trans, root, path, 2344 eb, i, &key); 2345 if (ret) 2346 break; 2347 2348 /* for regular files, make sure corresponding 2349 * orphan item exist. extents past the new EOF 2350 * will be truncated later by orphan cleanup. 2351 */ 2352 if (S_ISREG(mode)) { 2353 ret = insert_orphan_item(wc->trans, root, 2354 key.objectid); 2355 if (ret) 2356 break; 2357 } 2358 2359 ret = link_to_fixup_dir(wc->trans, root, 2360 path, key.objectid); 2361 if (ret) 2362 break; 2363 } 2364 2365 if (key.type == BTRFS_DIR_INDEX_KEY && 2366 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2367 ret = replay_one_dir_item(wc->trans, root, path, 2368 eb, i, &key); 2369 if (ret) 2370 break; 2371 } 2372 2373 if (wc->stage < LOG_WALK_REPLAY_ALL) 2374 continue; 2375 2376 /* these keys are simply copied */ 2377 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2378 ret = overwrite_item(wc->trans, root, path, 2379 eb, i, &key); 2380 if (ret) 2381 break; 2382 } else if (key.type == BTRFS_INODE_REF_KEY || 2383 key.type == BTRFS_INODE_EXTREF_KEY) { 2384 ret = add_inode_ref(wc->trans, root, log, path, 2385 eb, i, &key); 2386 if (ret && ret != -ENOENT) 2387 break; 2388 ret = 0; 2389 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2390 ret = replay_one_extent(wc->trans, root, path, 2391 eb, i, &key); 2392 if (ret) 2393 break; 2394 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2395 ret = replay_one_dir_item(wc->trans, root, path, 2396 eb, i, &key); 2397 if (ret) 2398 break; 2399 } 2400 } 2401 btrfs_free_path(path); 2402 return ret; 2403 } 2404 2405 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2406 struct btrfs_root *root, 2407 struct btrfs_path *path, int *level, 2408 struct walk_control *wc) 2409 { 2410 u64 root_owner; 2411 u64 bytenr; 2412 u64 ptr_gen; 2413 struct extent_buffer *next; 2414 struct extent_buffer *cur; 2415 struct extent_buffer *parent; 2416 u32 blocksize; 2417 int ret = 0; 2418 2419 WARN_ON(*level < 0); 2420 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2421 2422 while (*level > 0) { 2423 WARN_ON(*level < 0); 2424 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2425 cur = path->nodes[*level]; 2426 2427 WARN_ON(btrfs_header_level(cur) != *level); 2428 2429 if (path->slots[*level] >= 2430 btrfs_header_nritems(cur)) 2431 break; 2432 2433 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2434 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2435 blocksize = root->nodesize; 2436 2437 parent = path->nodes[*level]; 2438 root_owner = btrfs_header_owner(parent); 2439 2440 next = btrfs_find_create_tree_block(root, bytenr); 2441 if (IS_ERR(next)) 2442 return PTR_ERR(next); 2443 2444 if (*level == 1) { 2445 ret = wc->process_func(root, next, wc, ptr_gen); 2446 if (ret) { 2447 free_extent_buffer(next); 2448 return ret; 2449 } 2450 2451 path->slots[*level]++; 2452 if (wc->free) { 2453 ret = btrfs_read_buffer(next, ptr_gen); 2454 if (ret) { 2455 free_extent_buffer(next); 2456 return ret; 2457 } 2458 2459 if (trans) { 2460 btrfs_tree_lock(next); 2461 btrfs_set_lock_blocking(next); 2462 clean_tree_block(trans, root->fs_info, 2463 next); 2464 btrfs_wait_tree_block_writeback(next); 2465 btrfs_tree_unlock(next); 2466 } 2467 2468 WARN_ON(root_owner != 2469 BTRFS_TREE_LOG_OBJECTID); 2470 ret = btrfs_free_and_pin_reserved_extent(root, 2471 bytenr, blocksize); 2472 if (ret) { 2473 free_extent_buffer(next); 2474 return ret; 2475 } 2476 } 2477 free_extent_buffer(next); 2478 continue; 2479 } 2480 ret = btrfs_read_buffer(next, ptr_gen); 2481 if (ret) { 2482 free_extent_buffer(next); 2483 return ret; 2484 } 2485 2486 WARN_ON(*level <= 0); 2487 if (path->nodes[*level-1]) 2488 free_extent_buffer(path->nodes[*level-1]); 2489 path->nodes[*level-1] = next; 2490 *level = btrfs_header_level(next); 2491 path->slots[*level] = 0; 2492 cond_resched(); 2493 } 2494 WARN_ON(*level < 0); 2495 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2496 2497 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2498 2499 cond_resched(); 2500 return 0; 2501 } 2502 2503 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2504 struct btrfs_root *root, 2505 struct btrfs_path *path, int *level, 2506 struct walk_control *wc) 2507 { 2508 u64 root_owner; 2509 int i; 2510 int slot; 2511 int ret; 2512 2513 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2514 slot = path->slots[i]; 2515 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2516 path->slots[i]++; 2517 *level = i; 2518 WARN_ON(*level == 0); 2519 return 0; 2520 } else { 2521 struct extent_buffer *parent; 2522 if (path->nodes[*level] == root->node) 2523 parent = path->nodes[*level]; 2524 else 2525 parent = path->nodes[*level + 1]; 2526 2527 root_owner = btrfs_header_owner(parent); 2528 ret = wc->process_func(root, path->nodes[*level], wc, 2529 btrfs_header_generation(path->nodes[*level])); 2530 if (ret) 2531 return ret; 2532 2533 if (wc->free) { 2534 struct extent_buffer *next; 2535 2536 next = path->nodes[*level]; 2537 2538 if (trans) { 2539 btrfs_tree_lock(next); 2540 btrfs_set_lock_blocking(next); 2541 clean_tree_block(trans, root->fs_info, 2542 next); 2543 btrfs_wait_tree_block_writeback(next); 2544 btrfs_tree_unlock(next); 2545 } 2546 2547 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2548 ret = btrfs_free_and_pin_reserved_extent(root, 2549 path->nodes[*level]->start, 2550 path->nodes[*level]->len); 2551 if (ret) 2552 return ret; 2553 } 2554 free_extent_buffer(path->nodes[*level]); 2555 path->nodes[*level] = NULL; 2556 *level = i + 1; 2557 } 2558 } 2559 return 1; 2560 } 2561 2562 /* 2563 * drop the reference count on the tree rooted at 'snap'. This traverses 2564 * the tree freeing any blocks that have a ref count of zero after being 2565 * decremented. 2566 */ 2567 static int walk_log_tree(struct btrfs_trans_handle *trans, 2568 struct btrfs_root *log, struct walk_control *wc) 2569 { 2570 int ret = 0; 2571 int wret; 2572 int level; 2573 struct btrfs_path *path; 2574 int orig_level; 2575 2576 path = btrfs_alloc_path(); 2577 if (!path) 2578 return -ENOMEM; 2579 2580 level = btrfs_header_level(log->node); 2581 orig_level = level; 2582 path->nodes[level] = log->node; 2583 extent_buffer_get(log->node); 2584 path->slots[level] = 0; 2585 2586 while (1) { 2587 wret = walk_down_log_tree(trans, log, path, &level, wc); 2588 if (wret > 0) 2589 break; 2590 if (wret < 0) { 2591 ret = wret; 2592 goto out; 2593 } 2594 2595 wret = walk_up_log_tree(trans, log, path, &level, wc); 2596 if (wret > 0) 2597 break; 2598 if (wret < 0) { 2599 ret = wret; 2600 goto out; 2601 } 2602 } 2603 2604 /* was the root node processed? if not, catch it here */ 2605 if (path->nodes[orig_level]) { 2606 ret = wc->process_func(log, path->nodes[orig_level], wc, 2607 btrfs_header_generation(path->nodes[orig_level])); 2608 if (ret) 2609 goto out; 2610 if (wc->free) { 2611 struct extent_buffer *next; 2612 2613 next = path->nodes[orig_level]; 2614 2615 if (trans) { 2616 btrfs_tree_lock(next); 2617 btrfs_set_lock_blocking(next); 2618 clean_tree_block(trans, log->fs_info, next); 2619 btrfs_wait_tree_block_writeback(next); 2620 btrfs_tree_unlock(next); 2621 } 2622 2623 WARN_ON(log->root_key.objectid != 2624 BTRFS_TREE_LOG_OBJECTID); 2625 ret = btrfs_free_and_pin_reserved_extent(log, next->start, 2626 next->len); 2627 if (ret) 2628 goto out; 2629 } 2630 } 2631 2632 out: 2633 btrfs_free_path(path); 2634 return ret; 2635 } 2636 2637 /* 2638 * helper function to update the item for a given subvolumes log root 2639 * in the tree of log roots 2640 */ 2641 static int update_log_root(struct btrfs_trans_handle *trans, 2642 struct btrfs_root *log) 2643 { 2644 int ret; 2645 2646 if (log->log_transid == 1) { 2647 /* insert root item on the first sync */ 2648 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, 2649 &log->root_key, &log->root_item); 2650 } else { 2651 ret = btrfs_update_root(trans, log->fs_info->log_root_tree, 2652 &log->root_key, &log->root_item); 2653 } 2654 return ret; 2655 } 2656 2657 static void wait_log_commit(struct btrfs_root *root, int transid) 2658 { 2659 DEFINE_WAIT(wait); 2660 int index = transid % 2; 2661 2662 /* 2663 * we only allow two pending log transactions at a time, 2664 * so we know that if ours is more than 2 older than the 2665 * current transaction, we're done 2666 */ 2667 do { 2668 prepare_to_wait(&root->log_commit_wait[index], 2669 &wait, TASK_UNINTERRUPTIBLE); 2670 mutex_unlock(&root->log_mutex); 2671 2672 if (root->log_transid_committed < transid && 2673 atomic_read(&root->log_commit[index])) 2674 schedule(); 2675 2676 finish_wait(&root->log_commit_wait[index], &wait); 2677 mutex_lock(&root->log_mutex); 2678 } while (root->log_transid_committed < transid && 2679 atomic_read(&root->log_commit[index])); 2680 } 2681 2682 static void wait_for_writer(struct btrfs_root *root) 2683 { 2684 DEFINE_WAIT(wait); 2685 2686 while (atomic_read(&root->log_writers)) { 2687 prepare_to_wait(&root->log_writer_wait, 2688 &wait, TASK_UNINTERRUPTIBLE); 2689 mutex_unlock(&root->log_mutex); 2690 if (atomic_read(&root->log_writers)) 2691 schedule(); 2692 finish_wait(&root->log_writer_wait, &wait); 2693 mutex_lock(&root->log_mutex); 2694 } 2695 } 2696 2697 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2698 struct btrfs_log_ctx *ctx) 2699 { 2700 if (!ctx) 2701 return; 2702 2703 mutex_lock(&root->log_mutex); 2704 list_del_init(&ctx->list); 2705 mutex_unlock(&root->log_mutex); 2706 } 2707 2708 /* 2709 * Invoked in log mutex context, or be sure there is no other task which 2710 * can access the list. 2711 */ 2712 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 2713 int index, int error) 2714 { 2715 struct btrfs_log_ctx *ctx; 2716 struct btrfs_log_ctx *safe; 2717 2718 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 2719 list_del_init(&ctx->list); 2720 ctx->log_ret = error; 2721 } 2722 2723 INIT_LIST_HEAD(&root->log_ctxs[index]); 2724 } 2725 2726 /* 2727 * btrfs_sync_log does sends a given tree log down to the disk and 2728 * updates the super blocks to record it. When this call is done, 2729 * you know that any inodes previously logged are safely on disk only 2730 * if it returns 0. 2731 * 2732 * Any other return value means you need to call btrfs_commit_transaction. 2733 * Some of the edge cases for fsyncing directories that have had unlinks 2734 * or renames done in the past mean that sometimes the only safe 2735 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 2736 * that has happened. 2737 */ 2738 int btrfs_sync_log(struct btrfs_trans_handle *trans, 2739 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 2740 { 2741 int index1; 2742 int index2; 2743 int mark; 2744 int ret; 2745 struct btrfs_root *log = root->log_root; 2746 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 2747 int log_transid = 0; 2748 struct btrfs_log_ctx root_log_ctx; 2749 struct blk_plug plug; 2750 2751 mutex_lock(&root->log_mutex); 2752 log_transid = ctx->log_transid; 2753 if (root->log_transid_committed >= log_transid) { 2754 mutex_unlock(&root->log_mutex); 2755 return ctx->log_ret; 2756 } 2757 2758 index1 = log_transid % 2; 2759 if (atomic_read(&root->log_commit[index1])) { 2760 wait_log_commit(root, log_transid); 2761 mutex_unlock(&root->log_mutex); 2762 return ctx->log_ret; 2763 } 2764 ASSERT(log_transid == root->log_transid); 2765 atomic_set(&root->log_commit[index1], 1); 2766 2767 /* wait for previous tree log sync to complete */ 2768 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 2769 wait_log_commit(root, log_transid - 1); 2770 2771 while (1) { 2772 int batch = atomic_read(&root->log_batch); 2773 /* when we're on an ssd, just kick the log commit out */ 2774 if (!btrfs_test_opt(root->fs_info, SSD) && 2775 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 2776 mutex_unlock(&root->log_mutex); 2777 schedule_timeout_uninterruptible(1); 2778 mutex_lock(&root->log_mutex); 2779 } 2780 wait_for_writer(root); 2781 if (batch == atomic_read(&root->log_batch)) 2782 break; 2783 } 2784 2785 /* bail out if we need to do a full commit */ 2786 if (btrfs_need_log_full_commit(root->fs_info, trans)) { 2787 ret = -EAGAIN; 2788 btrfs_free_logged_extents(log, log_transid); 2789 mutex_unlock(&root->log_mutex); 2790 goto out; 2791 } 2792 2793 if (log_transid % 2 == 0) 2794 mark = EXTENT_DIRTY; 2795 else 2796 mark = EXTENT_NEW; 2797 2798 /* we start IO on all the marked extents here, but we don't actually 2799 * wait for them until later. 2800 */ 2801 blk_start_plug(&plug); 2802 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 2803 if (ret) { 2804 blk_finish_plug(&plug); 2805 btrfs_abort_transaction(trans, ret); 2806 btrfs_free_logged_extents(log, log_transid); 2807 btrfs_set_log_full_commit(root->fs_info, trans); 2808 mutex_unlock(&root->log_mutex); 2809 goto out; 2810 } 2811 2812 btrfs_set_root_node(&log->root_item, log->node); 2813 2814 root->log_transid++; 2815 log->log_transid = root->log_transid; 2816 root->log_start_pid = 0; 2817 /* 2818 * IO has been started, blocks of the log tree have WRITTEN flag set 2819 * in their headers. new modifications of the log will be written to 2820 * new positions. so it's safe to allow log writers to go in. 2821 */ 2822 mutex_unlock(&root->log_mutex); 2823 2824 btrfs_init_log_ctx(&root_log_ctx, NULL); 2825 2826 mutex_lock(&log_root_tree->log_mutex); 2827 atomic_inc(&log_root_tree->log_batch); 2828 atomic_inc(&log_root_tree->log_writers); 2829 2830 index2 = log_root_tree->log_transid % 2; 2831 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 2832 root_log_ctx.log_transid = log_root_tree->log_transid; 2833 2834 mutex_unlock(&log_root_tree->log_mutex); 2835 2836 ret = update_log_root(trans, log); 2837 2838 mutex_lock(&log_root_tree->log_mutex); 2839 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2840 /* 2841 * Implicit memory barrier after atomic_dec_and_test 2842 */ 2843 if (waitqueue_active(&log_root_tree->log_writer_wait)) 2844 wake_up(&log_root_tree->log_writer_wait); 2845 } 2846 2847 if (ret) { 2848 if (!list_empty(&root_log_ctx.list)) 2849 list_del_init(&root_log_ctx.list); 2850 2851 blk_finish_plug(&plug); 2852 btrfs_set_log_full_commit(root->fs_info, trans); 2853 2854 if (ret != -ENOSPC) { 2855 btrfs_abort_transaction(trans, ret); 2856 mutex_unlock(&log_root_tree->log_mutex); 2857 goto out; 2858 } 2859 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2860 btrfs_free_logged_extents(log, log_transid); 2861 mutex_unlock(&log_root_tree->log_mutex); 2862 ret = -EAGAIN; 2863 goto out; 2864 } 2865 2866 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 2867 blk_finish_plug(&plug); 2868 list_del_init(&root_log_ctx.list); 2869 mutex_unlock(&log_root_tree->log_mutex); 2870 ret = root_log_ctx.log_ret; 2871 goto out; 2872 } 2873 2874 index2 = root_log_ctx.log_transid % 2; 2875 if (atomic_read(&log_root_tree->log_commit[index2])) { 2876 blk_finish_plug(&plug); 2877 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, 2878 mark); 2879 btrfs_wait_logged_extents(trans, log, log_transid); 2880 wait_log_commit(log_root_tree, 2881 root_log_ctx.log_transid); 2882 mutex_unlock(&log_root_tree->log_mutex); 2883 if (!ret) 2884 ret = root_log_ctx.log_ret; 2885 goto out; 2886 } 2887 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 2888 atomic_set(&log_root_tree->log_commit[index2], 1); 2889 2890 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 2891 wait_log_commit(log_root_tree, 2892 root_log_ctx.log_transid - 1); 2893 } 2894 2895 wait_for_writer(log_root_tree); 2896 2897 /* 2898 * now that we've moved on to the tree of log tree roots, 2899 * check the full commit flag again 2900 */ 2901 if (btrfs_need_log_full_commit(root->fs_info, trans)) { 2902 blk_finish_plug(&plug); 2903 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2904 btrfs_free_logged_extents(log, log_transid); 2905 mutex_unlock(&log_root_tree->log_mutex); 2906 ret = -EAGAIN; 2907 goto out_wake_log_root; 2908 } 2909 2910 ret = btrfs_write_marked_extents(log_root_tree, 2911 &log_root_tree->dirty_log_pages, 2912 EXTENT_DIRTY | EXTENT_NEW); 2913 blk_finish_plug(&plug); 2914 if (ret) { 2915 btrfs_set_log_full_commit(root->fs_info, trans); 2916 btrfs_abort_transaction(trans, ret); 2917 btrfs_free_logged_extents(log, log_transid); 2918 mutex_unlock(&log_root_tree->log_mutex); 2919 goto out_wake_log_root; 2920 } 2921 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2922 if (!ret) 2923 ret = btrfs_wait_marked_extents(log_root_tree, 2924 &log_root_tree->dirty_log_pages, 2925 EXTENT_NEW | EXTENT_DIRTY); 2926 if (ret) { 2927 btrfs_set_log_full_commit(root->fs_info, trans); 2928 btrfs_free_logged_extents(log, log_transid); 2929 mutex_unlock(&log_root_tree->log_mutex); 2930 goto out_wake_log_root; 2931 } 2932 btrfs_wait_logged_extents(trans, log, log_transid); 2933 2934 btrfs_set_super_log_root(root->fs_info->super_for_commit, 2935 log_root_tree->node->start); 2936 btrfs_set_super_log_root_level(root->fs_info->super_for_commit, 2937 btrfs_header_level(log_root_tree->node)); 2938 2939 log_root_tree->log_transid++; 2940 mutex_unlock(&log_root_tree->log_mutex); 2941 2942 /* 2943 * nobody else is going to jump in and write the the ctree 2944 * super here because the log_commit atomic below is protecting 2945 * us. We must be called with a transaction handle pinning 2946 * the running transaction open, so a full commit can't hop 2947 * in and cause problems either. 2948 */ 2949 ret = write_ctree_super(trans, root->fs_info->tree_root, 1); 2950 if (ret) { 2951 btrfs_set_log_full_commit(root->fs_info, trans); 2952 btrfs_abort_transaction(trans, ret); 2953 goto out_wake_log_root; 2954 } 2955 2956 mutex_lock(&root->log_mutex); 2957 if (root->last_log_commit < log_transid) 2958 root->last_log_commit = log_transid; 2959 mutex_unlock(&root->log_mutex); 2960 2961 out_wake_log_root: 2962 mutex_lock(&log_root_tree->log_mutex); 2963 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 2964 2965 log_root_tree->log_transid_committed++; 2966 atomic_set(&log_root_tree->log_commit[index2], 0); 2967 mutex_unlock(&log_root_tree->log_mutex); 2968 2969 /* 2970 * The barrier before waitqueue_active is implied by mutex_unlock 2971 */ 2972 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2973 wake_up(&log_root_tree->log_commit_wait[index2]); 2974 out: 2975 mutex_lock(&root->log_mutex); 2976 btrfs_remove_all_log_ctxs(root, index1, ret); 2977 root->log_transid_committed++; 2978 atomic_set(&root->log_commit[index1], 0); 2979 mutex_unlock(&root->log_mutex); 2980 2981 /* 2982 * The barrier before waitqueue_active is implied by mutex_unlock 2983 */ 2984 if (waitqueue_active(&root->log_commit_wait[index1])) 2985 wake_up(&root->log_commit_wait[index1]); 2986 return ret; 2987 } 2988 2989 static void free_log_tree(struct btrfs_trans_handle *trans, 2990 struct btrfs_root *log) 2991 { 2992 int ret; 2993 u64 start; 2994 u64 end; 2995 struct walk_control wc = { 2996 .free = 1, 2997 .process_func = process_one_buffer 2998 }; 2999 3000 ret = walk_log_tree(trans, log, &wc); 3001 /* I don't think this can happen but just in case */ 3002 if (ret) 3003 btrfs_abort_transaction(trans, ret); 3004 3005 while (1) { 3006 ret = find_first_extent_bit(&log->dirty_log_pages, 3007 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, 3008 NULL); 3009 if (ret) 3010 break; 3011 3012 clear_extent_bits(&log->dirty_log_pages, start, end, 3013 EXTENT_DIRTY | EXTENT_NEW); 3014 } 3015 3016 /* 3017 * We may have short-circuited the log tree with the full commit logic 3018 * and left ordered extents on our list, so clear these out to keep us 3019 * from leaking inodes and memory. 3020 */ 3021 btrfs_free_logged_extents(log, 0); 3022 btrfs_free_logged_extents(log, 1); 3023 3024 free_extent_buffer(log->node); 3025 kfree(log); 3026 } 3027 3028 /* 3029 * free all the extents used by the tree log. This should be called 3030 * at commit time of the full transaction 3031 */ 3032 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3033 { 3034 if (root->log_root) { 3035 free_log_tree(trans, root->log_root); 3036 root->log_root = NULL; 3037 } 3038 return 0; 3039 } 3040 3041 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3042 struct btrfs_fs_info *fs_info) 3043 { 3044 if (fs_info->log_root_tree) { 3045 free_log_tree(trans, fs_info->log_root_tree); 3046 fs_info->log_root_tree = NULL; 3047 } 3048 return 0; 3049 } 3050 3051 /* 3052 * If both a file and directory are logged, and unlinks or renames are 3053 * mixed in, we have a few interesting corners: 3054 * 3055 * create file X in dir Y 3056 * link file X to X.link in dir Y 3057 * fsync file X 3058 * unlink file X but leave X.link 3059 * fsync dir Y 3060 * 3061 * After a crash we would expect only X.link to exist. But file X 3062 * didn't get fsync'd again so the log has back refs for X and X.link. 3063 * 3064 * We solve this by removing directory entries and inode backrefs from the 3065 * log when a file that was logged in the current transaction is 3066 * unlinked. Any later fsync will include the updated log entries, and 3067 * we'll be able to reconstruct the proper directory items from backrefs. 3068 * 3069 * This optimizations allows us to avoid relogging the entire inode 3070 * or the entire directory. 3071 */ 3072 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3073 struct btrfs_root *root, 3074 const char *name, int name_len, 3075 struct inode *dir, u64 index) 3076 { 3077 struct btrfs_root *log; 3078 struct btrfs_dir_item *di; 3079 struct btrfs_path *path; 3080 int ret; 3081 int err = 0; 3082 int bytes_del = 0; 3083 u64 dir_ino = btrfs_ino(dir); 3084 3085 if (BTRFS_I(dir)->logged_trans < trans->transid) 3086 return 0; 3087 3088 ret = join_running_log_trans(root); 3089 if (ret) 3090 return 0; 3091 3092 mutex_lock(&BTRFS_I(dir)->log_mutex); 3093 3094 log = root->log_root; 3095 path = btrfs_alloc_path(); 3096 if (!path) { 3097 err = -ENOMEM; 3098 goto out_unlock; 3099 } 3100 3101 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3102 name, name_len, -1); 3103 if (IS_ERR(di)) { 3104 err = PTR_ERR(di); 3105 goto fail; 3106 } 3107 if (di) { 3108 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3109 bytes_del += name_len; 3110 if (ret) { 3111 err = ret; 3112 goto fail; 3113 } 3114 } 3115 btrfs_release_path(path); 3116 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3117 index, name, name_len, -1); 3118 if (IS_ERR(di)) { 3119 err = PTR_ERR(di); 3120 goto fail; 3121 } 3122 if (di) { 3123 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3124 bytes_del += name_len; 3125 if (ret) { 3126 err = ret; 3127 goto fail; 3128 } 3129 } 3130 3131 /* update the directory size in the log to reflect the names 3132 * we have removed 3133 */ 3134 if (bytes_del) { 3135 struct btrfs_key key; 3136 3137 key.objectid = dir_ino; 3138 key.offset = 0; 3139 key.type = BTRFS_INODE_ITEM_KEY; 3140 btrfs_release_path(path); 3141 3142 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 3143 if (ret < 0) { 3144 err = ret; 3145 goto fail; 3146 } 3147 if (ret == 0) { 3148 struct btrfs_inode_item *item; 3149 u64 i_size; 3150 3151 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3152 struct btrfs_inode_item); 3153 i_size = btrfs_inode_size(path->nodes[0], item); 3154 if (i_size > bytes_del) 3155 i_size -= bytes_del; 3156 else 3157 i_size = 0; 3158 btrfs_set_inode_size(path->nodes[0], item, i_size); 3159 btrfs_mark_buffer_dirty(path->nodes[0]); 3160 } else 3161 ret = 0; 3162 btrfs_release_path(path); 3163 } 3164 fail: 3165 btrfs_free_path(path); 3166 out_unlock: 3167 mutex_unlock(&BTRFS_I(dir)->log_mutex); 3168 if (ret == -ENOSPC) { 3169 btrfs_set_log_full_commit(root->fs_info, trans); 3170 ret = 0; 3171 } else if (ret < 0) 3172 btrfs_abort_transaction(trans, ret); 3173 3174 btrfs_end_log_trans(root); 3175 3176 return err; 3177 } 3178 3179 /* see comments for btrfs_del_dir_entries_in_log */ 3180 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3181 struct btrfs_root *root, 3182 const char *name, int name_len, 3183 struct inode *inode, u64 dirid) 3184 { 3185 struct btrfs_root *log; 3186 u64 index; 3187 int ret; 3188 3189 if (BTRFS_I(inode)->logged_trans < trans->transid) 3190 return 0; 3191 3192 ret = join_running_log_trans(root); 3193 if (ret) 3194 return 0; 3195 log = root->log_root; 3196 mutex_lock(&BTRFS_I(inode)->log_mutex); 3197 3198 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3199 dirid, &index); 3200 mutex_unlock(&BTRFS_I(inode)->log_mutex); 3201 if (ret == -ENOSPC) { 3202 btrfs_set_log_full_commit(root->fs_info, trans); 3203 ret = 0; 3204 } else if (ret < 0 && ret != -ENOENT) 3205 btrfs_abort_transaction(trans, ret); 3206 btrfs_end_log_trans(root); 3207 3208 return ret; 3209 } 3210 3211 /* 3212 * creates a range item in the log for 'dirid'. first_offset and 3213 * last_offset tell us which parts of the key space the log should 3214 * be considered authoritative for. 3215 */ 3216 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3217 struct btrfs_root *log, 3218 struct btrfs_path *path, 3219 int key_type, u64 dirid, 3220 u64 first_offset, u64 last_offset) 3221 { 3222 int ret; 3223 struct btrfs_key key; 3224 struct btrfs_dir_log_item *item; 3225 3226 key.objectid = dirid; 3227 key.offset = first_offset; 3228 if (key_type == BTRFS_DIR_ITEM_KEY) 3229 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3230 else 3231 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3232 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3233 if (ret) 3234 return ret; 3235 3236 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3237 struct btrfs_dir_log_item); 3238 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3239 btrfs_mark_buffer_dirty(path->nodes[0]); 3240 btrfs_release_path(path); 3241 return 0; 3242 } 3243 3244 /* 3245 * log all the items included in the current transaction for a given 3246 * directory. This also creates the range items in the log tree required 3247 * to replay anything deleted before the fsync 3248 */ 3249 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3250 struct btrfs_root *root, struct inode *inode, 3251 struct btrfs_path *path, 3252 struct btrfs_path *dst_path, int key_type, 3253 struct btrfs_log_ctx *ctx, 3254 u64 min_offset, u64 *last_offset_ret) 3255 { 3256 struct btrfs_key min_key; 3257 struct btrfs_root *log = root->log_root; 3258 struct extent_buffer *src; 3259 int err = 0; 3260 int ret; 3261 int i; 3262 int nritems; 3263 u64 first_offset = min_offset; 3264 u64 last_offset = (u64)-1; 3265 u64 ino = btrfs_ino(inode); 3266 3267 log = root->log_root; 3268 3269 min_key.objectid = ino; 3270 min_key.type = key_type; 3271 min_key.offset = min_offset; 3272 3273 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3274 3275 /* 3276 * we didn't find anything from this transaction, see if there 3277 * is anything at all 3278 */ 3279 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3280 min_key.objectid = ino; 3281 min_key.type = key_type; 3282 min_key.offset = (u64)-1; 3283 btrfs_release_path(path); 3284 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3285 if (ret < 0) { 3286 btrfs_release_path(path); 3287 return ret; 3288 } 3289 ret = btrfs_previous_item(root, path, ino, key_type); 3290 3291 /* if ret == 0 there are items for this type, 3292 * create a range to tell us the last key of this type. 3293 * otherwise, there are no items in this directory after 3294 * *min_offset, and we create a range to indicate that. 3295 */ 3296 if (ret == 0) { 3297 struct btrfs_key tmp; 3298 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3299 path->slots[0]); 3300 if (key_type == tmp.type) 3301 first_offset = max(min_offset, tmp.offset) + 1; 3302 } 3303 goto done; 3304 } 3305 3306 /* go backward to find any previous key */ 3307 ret = btrfs_previous_item(root, path, ino, key_type); 3308 if (ret == 0) { 3309 struct btrfs_key tmp; 3310 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3311 if (key_type == tmp.type) { 3312 first_offset = tmp.offset; 3313 ret = overwrite_item(trans, log, dst_path, 3314 path->nodes[0], path->slots[0], 3315 &tmp); 3316 if (ret) { 3317 err = ret; 3318 goto done; 3319 } 3320 } 3321 } 3322 btrfs_release_path(path); 3323 3324 /* find the first key from this transaction again */ 3325 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3326 if (WARN_ON(ret != 0)) 3327 goto done; 3328 3329 /* 3330 * we have a block from this transaction, log every item in it 3331 * from our directory 3332 */ 3333 while (1) { 3334 struct btrfs_key tmp; 3335 src = path->nodes[0]; 3336 nritems = btrfs_header_nritems(src); 3337 for (i = path->slots[0]; i < nritems; i++) { 3338 struct btrfs_dir_item *di; 3339 3340 btrfs_item_key_to_cpu(src, &min_key, i); 3341 3342 if (min_key.objectid != ino || min_key.type != key_type) 3343 goto done; 3344 ret = overwrite_item(trans, log, dst_path, src, i, 3345 &min_key); 3346 if (ret) { 3347 err = ret; 3348 goto done; 3349 } 3350 3351 /* 3352 * We must make sure that when we log a directory entry, 3353 * the corresponding inode, after log replay, has a 3354 * matching link count. For example: 3355 * 3356 * touch foo 3357 * mkdir mydir 3358 * sync 3359 * ln foo mydir/bar 3360 * xfs_io -c "fsync" mydir 3361 * <crash> 3362 * <mount fs and log replay> 3363 * 3364 * Would result in a fsync log that when replayed, our 3365 * file inode would have a link count of 1, but we get 3366 * two directory entries pointing to the same inode. 3367 * After removing one of the names, it would not be 3368 * possible to remove the other name, which resulted 3369 * always in stale file handle errors, and would not 3370 * be possible to rmdir the parent directory, since 3371 * its i_size could never decrement to the value 3372 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3373 */ 3374 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3375 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3376 if (ctx && 3377 (btrfs_dir_transid(src, di) == trans->transid || 3378 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3379 tmp.type != BTRFS_ROOT_ITEM_KEY) 3380 ctx->log_new_dentries = true; 3381 } 3382 path->slots[0] = nritems; 3383 3384 /* 3385 * look ahead to the next item and see if it is also 3386 * from this directory and from this transaction 3387 */ 3388 ret = btrfs_next_leaf(root, path); 3389 if (ret == 1) { 3390 last_offset = (u64)-1; 3391 goto done; 3392 } 3393 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3394 if (tmp.objectid != ino || tmp.type != key_type) { 3395 last_offset = (u64)-1; 3396 goto done; 3397 } 3398 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3399 ret = overwrite_item(trans, log, dst_path, 3400 path->nodes[0], path->slots[0], 3401 &tmp); 3402 if (ret) 3403 err = ret; 3404 else 3405 last_offset = tmp.offset; 3406 goto done; 3407 } 3408 } 3409 done: 3410 btrfs_release_path(path); 3411 btrfs_release_path(dst_path); 3412 3413 if (err == 0) { 3414 *last_offset_ret = last_offset; 3415 /* 3416 * insert the log range keys to indicate where the log 3417 * is valid 3418 */ 3419 ret = insert_dir_log_key(trans, log, path, key_type, 3420 ino, first_offset, last_offset); 3421 if (ret) 3422 err = ret; 3423 } 3424 return err; 3425 } 3426 3427 /* 3428 * logging directories is very similar to logging inodes, We find all the items 3429 * from the current transaction and write them to the log. 3430 * 3431 * The recovery code scans the directory in the subvolume, and if it finds a 3432 * key in the range logged that is not present in the log tree, then it means 3433 * that dir entry was unlinked during the transaction. 3434 * 3435 * In order for that scan to work, we must include one key smaller than 3436 * the smallest logged by this transaction and one key larger than the largest 3437 * key logged by this transaction. 3438 */ 3439 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3440 struct btrfs_root *root, struct inode *inode, 3441 struct btrfs_path *path, 3442 struct btrfs_path *dst_path, 3443 struct btrfs_log_ctx *ctx) 3444 { 3445 u64 min_key; 3446 u64 max_key; 3447 int ret; 3448 int key_type = BTRFS_DIR_ITEM_KEY; 3449 3450 again: 3451 min_key = 0; 3452 max_key = 0; 3453 while (1) { 3454 ret = log_dir_items(trans, root, inode, path, 3455 dst_path, key_type, ctx, min_key, 3456 &max_key); 3457 if (ret) 3458 return ret; 3459 if (max_key == (u64)-1) 3460 break; 3461 min_key = max_key + 1; 3462 } 3463 3464 if (key_type == BTRFS_DIR_ITEM_KEY) { 3465 key_type = BTRFS_DIR_INDEX_KEY; 3466 goto again; 3467 } 3468 return 0; 3469 } 3470 3471 /* 3472 * a helper function to drop items from the log before we relog an 3473 * inode. max_key_type indicates the highest item type to remove. 3474 * This cannot be run for file data extents because it does not 3475 * free the extents they point to. 3476 */ 3477 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3478 struct btrfs_root *log, 3479 struct btrfs_path *path, 3480 u64 objectid, int max_key_type) 3481 { 3482 int ret; 3483 struct btrfs_key key; 3484 struct btrfs_key found_key; 3485 int start_slot; 3486 3487 key.objectid = objectid; 3488 key.type = max_key_type; 3489 key.offset = (u64)-1; 3490 3491 while (1) { 3492 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3493 BUG_ON(ret == 0); /* Logic error */ 3494 if (ret < 0) 3495 break; 3496 3497 if (path->slots[0] == 0) 3498 break; 3499 3500 path->slots[0]--; 3501 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3502 path->slots[0]); 3503 3504 if (found_key.objectid != objectid) 3505 break; 3506 3507 found_key.offset = 0; 3508 found_key.type = 0; 3509 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3510 &start_slot); 3511 3512 ret = btrfs_del_items(trans, log, path, start_slot, 3513 path->slots[0] - start_slot + 1); 3514 /* 3515 * If start slot isn't 0 then we don't need to re-search, we've 3516 * found the last guy with the objectid in this tree. 3517 */ 3518 if (ret || start_slot != 0) 3519 break; 3520 btrfs_release_path(path); 3521 } 3522 btrfs_release_path(path); 3523 if (ret > 0) 3524 ret = 0; 3525 return ret; 3526 } 3527 3528 static void fill_inode_item(struct btrfs_trans_handle *trans, 3529 struct extent_buffer *leaf, 3530 struct btrfs_inode_item *item, 3531 struct inode *inode, int log_inode_only, 3532 u64 logged_isize) 3533 { 3534 struct btrfs_map_token token; 3535 3536 btrfs_init_map_token(&token); 3537 3538 if (log_inode_only) { 3539 /* set the generation to zero so the recover code 3540 * can tell the difference between an logging 3541 * just to say 'this inode exists' and a logging 3542 * to say 'update this inode with these values' 3543 */ 3544 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3545 btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3546 } else { 3547 btrfs_set_token_inode_generation(leaf, item, 3548 BTRFS_I(inode)->generation, 3549 &token); 3550 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3551 } 3552 3553 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3554 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3555 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3556 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3557 3558 btrfs_set_token_timespec_sec(leaf, &item->atime, 3559 inode->i_atime.tv_sec, &token); 3560 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3561 inode->i_atime.tv_nsec, &token); 3562 3563 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3564 inode->i_mtime.tv_sec, &token); 3565 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3566 inode->i_mtime.tv_nsec, &token); 3567 3568 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3569 inode->i_ctime.tv_sec, &token); 3570 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3571 inode->i_ctime.tv_nsec, &token); 3572 3573 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3574 &token); 3575 3576 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3577 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3578 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3579 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3580 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3581 } 3582 3583 static int log_inode_item(struct btrfs_trans_handle *trans, 3584 struct btrfs_root *log, struct btrfs_path *path, 3585 struct inode *inode) 3586 { 3587 struct btrfs_inode_item *inode_item; 3588 int ret; 3589 3590 ret = btrfs_insert_empty_item(trans, log, path, 3591 &BTRFS_I(inode)->location, 3592 sizeof(*inode_item)); 3593 if (ret && ret != -EEXIST) 3594 return ret; 3595 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3596 struct btrfs_inode_item); 3597 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0); 3598 btrfs_release_path(path); 3599 return 0; 3600 } 3601 3602 static noinline int copy_items(struct btrfs_trans_handle *trans, 3603 struct inode *inode, 3604 struct btrfs_path *dst_path, 3605 struct btrfs_path *src_path, u64 *last_extent, 3606 int start_slot, int nr, int inode_only, 3607 u64 logged_isize) 3608 { 3609 unsigned long src_offset; 3610 unsigned long dst_offset; 3611 struct btrfs_root *log = BTRFS_I(inode)->root->log_root; 3612 struct btrfs_file_extent_item *extent; 3613 struct btrfs_inode_item *inode_item; 3614 struct extent_buffer *src = src_path->nodes[0]; 3615 struct btrfs_key first_key, last_key, key; 3616 int ret; 3617 struct btrfs_key *ins_keys; 3618 u32 *ins_sizes; 3619 char *ins_data; 3620 int i; 3621 struct list_head ordered_sums; 3622 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3623 bool has_extents = false; 3624 bool need_find_last_extent = true; 3625 bool done = false; 3626 3627 INIT_LIST_HEAD(&ordered_sums); 3628 3629 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3630 nr * sizeof(u32), GFP_NOFS); 3631 if (!ins_data) 3632 return -ENOMEM; 3633 3634 first_key.objectid = (u64)-1; 3635 3636 ins_sizes = (u32 *)ins_data; 3637 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3638 3639 for (i = 0; i < nr; i++) { 3640 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3641 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3642 } 3643 ret = btrfs_insert_empty_items(trans, log, dst_path, 3644 ins_keys, ins_sizes, nr); 3645 if (ret) { 3646 kfree(ins_data); 3647 return ret; 3648 } 3649 3650 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3651 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3652 dst_path->slots[0]); 3653 3654 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3655 3656 if ((i == (nr - 1))) 3657 last_key = ins_keys[i]; 3658 3659 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3660 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3661 dst_path->slots[0], 3662 struct btrfs_inode_item); 3663 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3664 inode, inode_only == LOG_INODE_EXISTS, 3665 logged_isize); 3666 } else { 3667 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3668 src_offset, ins_sizes[i]); 3669 } 3670 3671 /* 3672 * We set need_find_last_extent here in case we know we were 3673 * processing other items and then walk into the first extent in 3674 * the inode. If we don't hit an extent then nothing changes, 3675 * we'll do the last search the next time around. 3676 */ 3677 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 3678 has_extents = true; 3679 if (first_key.objectid == (u64)-1) 3680 first_key = ins_keys[i]; 3681 } else { 3682 need_find_last_extent = false; 3683 } 3684 3685 /* take a reference on file data extents so that truncates 3686 * or deletes of this inode don't have to relog the inode 3687 * again 3688 */ 3689 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 3690 !skip_csum) { 3691 int found_type; 3692 extent = btrfs_item_ptr(src, start_slot + i, 3693 struct btrfs_file_extent_item); 3694 3695 if (btrfs_file_extent_generation(src, extent) < trans->transid) 3696 continue; 3697 3698 found_type = btrfs_file_extent_type(src, extent); 3699 if (found_type == BTRFS_FILE_EXTENT_REG) { 3700 u64 ds, dl, cs, cl; 3701 ds = btrfs_file_extent_disk_bytenr(src, 3702 extent); 3703 /* ds == 0 is a hole */ 3704 if (ds == 0) 3705 continue; 3706 3707 dl = btrfs_file_extent_disk_num_bytes(src, 3708 extent); 3709 cs = btrfs_file_extent_offset(src, extent); 3710 cl = btrfs_file_extent_num_bytes(src, 3711 extent); 3712 if (btrfs_file_extent_compression(src, 3713 extent)) { 3714 cs = 0; 3715 cl = dl; 3716 } 3717 3718 ret = btrfs_lookup_csums_range( 3719 log->fs_info->csum_root, 3720 ds + cs, ds + cs + cl - 1, 3721 &ordered_sums, 0); 3722 if (ret) { 3723 btrfs_release_path(dst_path); 3724 kfree(ins_data); 3725 return ret; 3726 } 3727 } 3728 } 3729 } 3730 3731 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 3732 btrfs_release_path(dst_path); 3733 kfree(ins_data); 3734 3735 /* 3736 * we have to do this after the loop above to avoid changing the 3737 * log tree while trying to change the log tree. 3738 */ 3739 ret = 0; 3740 while (!list_empty(&ordered_sums)) { 3741 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3742 struct btrfs_ordered_sum, 3743 list); 3744 if (!ret) 3745 ret = btrfs_csum_file_blocks(trans, log, sums); 3746 list_del(&sums->list); 3747 kfree(sums); 3748 } 3749 3750 if (!has_extents) 3751 return ret; 3752 3753 if (need_find_last_extent && *last_extent == first_key.offset) { 3754 /* 3755 * We don't have any leafs between our current one and the one 3756 * we processed before that can have file extent items for our 3757 * inode (and have a generation number smaller than our current 3758 * transaction id). 3759 */ 3760 need_find_last_extent = false; 3761 } 3762 3763 /* 3764 * Because we use btrfs_search_forward we could skip leaves that were 3765 * not modified and then assume *last_extent is valid when it really 3766 * isn't. So back up to the previous leaf and read the end of the last 3767 * extent before we go and fill in holes. 3768 */ 3769 if (need_find_last_extent) { 3770 u64 len; 3771 3772 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path); 3773 if (ret < 0) 3774 return ret; 3775 if (ret) 3776 goto fill_holes; 3777 if (src_path->slots[0]) 3778 src_path->slots[0]--; 3779 src = src_path->nodes[0]; 3780 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 3781 if (key.objectid != btrfs_ino(inode) || 3782 key.type != BTRFS_EXTENT_DATA_KEY) 3783 goto fill_holes; 3784 extent = btrfs_item_ptr(src, src_path->slots[0], 3785 struct btrfs_file_extent_item); 3786 if (btrfs_file_extent_type(src, extent) == 3787 BTRFS_FILE_EXTENT_INLINE) { 3788 len = btrfs_file_extent_inline_len(src, 3789 src_path->slots[0], 3790 extent); 3791 *last_extent = ALIGN(key.offset + len, 3792 log->sectorsize); 3793 } else { 3794 len = btrfs_file_extent_num_bytes(src, extent); 3795 *last_extent = key.offset + len; 3796 } 3797 } 3798 fill_holes: 3799 /* So we did prev_leaf, now we need to move to the next leaf, but a few 3800 * things could have happened 3801 * 3802 * 1) A merge could have happened, so we could currently be on a leaf 3803 * that holds what we were copying in the first place. 3804 * 2) A split could have happened, and now not all of the items we want 3805 * are on the same leaf. 3806 * 3807 * So we need to adjust how we search for holes, we need to drop the 3808 * path and re-search for the first extent key we found, and then walk 3809 * forward until we hit the last one we copied. 3810 */ 3811 if (need_find_last_extent) { 3812 /* btrfs_prev_leaf could return 1 without releasing the path */ 3813 btrfs_release_path(src_path); 3814 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key, 3815 src_path, 0, 0); 3816 if (ret < 0) 3817 return ret; 3818 ASSERT(ret == 0); 3819 src = src_path->nodes[0]; 3820 i = src_path->slots[0]; 3821 } else { 3822 i = start_slot; 3823 } 3824 3825 /* 3826 * Ok so here we need to go through and fill in any holes we may have 3827 * to make sure that holes are punched for those areas in case they had 3828 * extents previously. 3829 */ 3830 while (!done) { 3831 u64 offset, len; 3832 u64 extent_end; 3833 3834 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 3835 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path); 3836 if (ret < 0) 3837 return ret; 3838 ASSERT(ret == 0); 3839 src = src_path->nodes[0]; 3840 i = 0; 3841 } 3842 3843 btrfs_item_key_to_cpu(src, &key, i); 3844 if (!btrfs_comp_cpu_keys(&key, &last_key)) 3845 done = true; 3846 if (key.objectid != btrfs_ino(inode) || 3847 key.type != BTRFS_EXTENT_DATA_KEY) { 3848 i++; 3849 continue; 3850 } 3851 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 3852 if (btrfs_file_extent_type(src, extent) == 3853 BTRFS_FILE_EXTENT_INLINE) { 3854 len = btrfs_file_extent_inline_len(src, i, extent); 3855 extent_end = ALIGN(key.offset + len, log->sectorsize); 3856 } else { 3857 len = btrfs_file_extent_num_bytes(src, extent); 3858 extent_end = key.offset + len; 3859 } 3860 i++; 3861 3862 if (*last_extent == key.offset) { 3863 *last_extent = extent_end; 3864 continue; 3865 } 3866 offset = *last_extent; 3867 len = key.offset - *last_extent; 3868 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 3869 offset, 0, 0, len, 0, len, 0, 3870 0, 0); 3871 if (ret) 3872 break; 3873 *last_extent = extent_end; 3874 } 3875 /* 3876 * Need to let the callers know we dropped the path so they should 3877 * re-search. 3878 */ 3879 if (!ret && need_find_last_extent) 3880 ret = 1; 3881 return ret; 3882 } 3883 3884 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 3885 { 3886 struct extent_map *em1, *em2; 3887 3888 em1 = list_entry(a, struct extent_map, list); 3889 em2 = list_entry(b, struct extent_map, list); 3890 3891 if (em1->start < em2->start) 3892 return -1; 3893 else if (em1->start > em2->start) 3894 return 1; 3895 return 0; 3896 } 3897 3898 static int wait_ordered_extents(struct btrfs_trans_handle *trans, 3899 struct inode *inode, 3900 struct btrfs_root *root, 3901 const struct extent_map *em, 3902 const struct list_head *logged_list, 3903 bool *ordered_io_error) 3904 { 3905 struct btrfs_ordered_extent *ordered; 3906 struct btrfs_root *log = root->log_root; 3907 u64 mod_start = em->mod_start; 3908 u64 mod_len = em->mod_len; 3909 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3910 u64 csum_offset; 3911 u64 csum_len; 3912 LIST_HEAD(ordered_sums); 3913 int ret = 0; 3914 3915 *ordered_io_error = false; 3916 3917 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 3918 em->block_start == EXTENT_MAP_HOLE) 3919 return 0; 3920 3921 /* 3922 * Wait far any ordered extent that covers our extent map. If it 3923 * finishes without an error, first check and see if our csums are on 3924 * our outstanding ordered extents. 3925 */ 3926 list_for_each_entry(ordered, logged_list, log_list) { 3927 struct btrfs_ordered_sum *sum; 3928 3929 if (!mod_len) 3930 break; 3931 3932 if (ordered->file_offset + ordered->len <= mod_start || 3933 mod_start + mod_len <= ordered->file_offset) 3934 continue; 3935 3936 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && 3937 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && 3938 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { 3939 const u64 start = ordered->file_offset; 3940 const u64 end = ordered->file_offset + ordered->len - 1; 3941 3942 WARN_ON(ordered->inode != inode); 3943 filemap_fdatawrite_range(inode->i_mapping, start, end); 3944 } 3945 3946 wait_event(ordered->wait, 3947 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) || 3948 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))); 3949 3950 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) { 3951 /* 3952 * Clear the AS_EIO/AS_ENOSPC flags from the inode's 3953 * i_mapping flags, so that the next fsync won't get 3954 * an outdated io error too. 3955 */ 3956 filemap_check_errors(inode->i_mapping); 3957 *ordered_io_error = true; 3958 break; 3959 } 3960 /* 3961 * We are going to copy all the csums on this ordered extent, so 3962 * go ahead and adjust mod_start and mod_len in case this 3963 * ordered extent has already been logged. 3964 */ 3965 if (ordered->file_offset > mod_start) { 3966 if (ordered->file_offset + ordered->len >= 3967 mod_start + mod_len) 3968 mod_len = ordered->file_offset - mod_start; 3969 /* 3970 * If we have this case 3971 * 3972 * |--------- logged extent ---------| 3973 * |----- ordered extent ----| 3974 * 3975 * Just don't mess with mod_start and mod_len, we'll 3976 * just end up logging more csums than we need and it 3977 * will be ok. 3978 */ 3979 } else { 3980 if (ordered->file_offset + ordered->len < 3981 mod_start + mod_len) { 3982 mod_len = (mod_start + mod_len) - 3983 (ordered->file_offset + ordered->len); 3984 mod_start = ordered->file_offset + 3985 ordered->len; 3986 } else { 3987 mod_len = 0; 3988 } 3989 } 3990 3991 if (skip_csum) 3992 continue; 3993 3994 /* 3995 * To keep us from looping for the above case of an ordered 3996 * extent that falls inside of the logged extent. 3997 */ 3998 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, 3999 &ordered->flags)) 4000 continue; 4001 4002 list_for_each_entry(sum, &ordered->list, list) { 4003 ret = btrfs_csum_file_blocks(trans, log, sum); 4004 if (ret) 4005 break; 4006 } 4007 } 4008 4009 if (*ordered_io_error || !mod_len || ret || skip_csum) 4010 return ret; 4011 4012 if (em->compress_type) { 4013 csum_offset = 0; 4014 csum_len = max(em->block_len, em->orig_block_len); 4015 } else { 4016 csum_offset = mod_start - em->start; 4017 csum_len = mod_len; 4018 } 4019 4020 /* block start is already adjusted for the file extent offset. */ 4021 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 4022 em->block_start + csum_offset, 4023 em->block_start + csum_offset + 4024 csum_len - 1, &ordered_sums, 0); 4025 if (ret) 4026 return ret; 4027 4028 while (!list_empty(&ordered_sums)) { 4029 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4030 struct btrfs_ordered_sum, 4031 list); 4032 if (!ret) 4033 ret = btrfs_csum_file_blocks(trans, log, sums); 4034 list_del(&sums->list); 4035 kfree(sums); 4036 } 4037 4038 return ret; 4039 } 4040 4041 static int log_one_extent(struct btrfs_trans_handle *trans, 4042 struct inode *inode, struct btrfs_root *root, 4043 const struct extent_map *em, 4044 struct btrfs_path *path, 4045 const struct list_head *logged_list, 4046 struct btrfs_log_ctx *ctx) 4047 { 4048 struct btrfs_root *log = root->log_root; 4049 struct btrfs_file_extent_item *fi; 4050 struct extent_buffer *leaf; 4051 struct btrfs_map_token token; 4052 struct btrfs_key key; 4053 u64 extent_offset = em->start - em->orig_start; 4054 u64 block_len; 4055 int ret; 4056 int extent_inserted = 0; 4057 bool ordered_io_err = false; 4058 4059 ret = wait_ordered_extents(trans, inode, root, em, logged_list, 4060 &ordered_io_err); 4061 if (ret) 4062 return ret; 4063 4064 if (ordered_io_err) { 4065 ctx->io_err = -EIO; 4066 return 0; 4067 } 4068 4069 btrfs_init_map_token(&token); 4070 4071 ret = __btrfs_drop_extents(trans, log, inode, path, em->start, 4072 em->start + em->len, NULL, 0, 1, 4073 sizeof(*fi), &extent_inserted); 4074 if (ret) 4075 return ret; 4076 4077 if (!extent_inserted) { 4078 key.objectid = btrfs_ino(inode); 4079 key.type = BTRFS_EXTENT_DATA_KEY; 4080 key.offset = em->start; 4081 4082 ret = btrfs_insert_empty_item(trans, log, path, &key, 4083 sizeof(*fi)); 4084 if (ret) 4085 return ret; 4086 } 4087 leaf = path->nodes[0]; 4088 fi = btrfs_item_ptr(leaf, path->slots[0], 4089 struct btrfs_file_extent_item); 4090 4091 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4092 &token); 4093 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4094 btrfs_set_token_file_extent_type(leaf, fi, 4095 BTRFS_FILE_EXTENT_PREALLOC, 4096 &token); 4097 else 4098 btrfs_set_token_file_extent_type(leaf, fi, 4099 BTRFS_FILE_EXTENT_REG, 4100 &token); 4101 4102 block_len = max(em->block_len, em->orig_block_len); 4103 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4104 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4105 em->block_start, 4106 &token); 4107 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4108 &token); 4109 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4110 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4111 em->block_start - 4112 extent_offset, &token); 4113 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4114 &token); 4115 } else { 4116 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4117 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4118 &token); 4119 } 4120 4121 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4122 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4123 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4124 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4125 &token); 4126 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4127 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4128 btrfs_mark_buffer_dirty(leaf); 4129 4130 btrfs_release_path(path); 4131 4132 return ret; 4133 } 4134 4135 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4136 struct btrfs_root *root, 4137 struct inode *inode, 4138 struct btrfs_path *path, 4139 struct list_head *logged_list, 4140 struct btrfs_log_ctx *ctx, 4141 const u64 start, 4142 const u64 end) 4143 { 4144 struct extent_map *em, *n; 4145 struct list_head extents; 4146 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 4147 u64 test_gen; 4148 int ret = 0; 4149 int num = 0; 4150 4151 INIT_LIST_HEAD(&extents); 4152 4153 down_write(&BTRFS_I(inode)->dio_sem); 4154 write_lock(&tree->lock); 4155 test_gen = root->fs_info->last_trans_committed; 4156 4157 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4158 list_del_init(&em->list); 4159 4160 /* 4161 * Just an arbitrary number, this can be really CPU intensive 4162 * once we start getting a lot of extents, and really once we 4163 * have a bunch of extents we just want to commit since it will 4164 * be faster. 4165 */ 4166 if (++num > 32768) { 4167 list_del_init(&tree->modified_extents); 4168 ret = -EFBIG; 4169 goto process; 4170 } 4171 4172 if (em->generation <= test_gen) 4173 continue; 4174 /* Need a ref to keep it from getting evicted from cache */ 4175 atomic_inc(&em->refs); 4176 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4177 list_add_tail(&em->list, &extents); 4178 num++; 4179 } 4180 4181 list_sort(NULL, &extents, extent_cmp); 4182 btrfs_get_logged_extents(inode, logged_list, start, end); 4183 /* 4184 * Some ordered extents started by fsync might have completed 4185 * before we could collect them into the list logged_list, which 4186 * means they're gone, not in our logged_list nor in the inode's 4187 * ordered tree. We want the application/user space to know an 4188 * error happened while attempting to persist file data so that 4189 * it can take proper action. If such error happened, we leave 4190 * without writing to the log tree and the fsync must report the 4191 * file data write error and not commit the current transaction. 4192 */ 4193 ret = filemap_check_errors(inode->i_mapping); 4194 if (ret) 4195 ctx->io_err = ret; 4196 process: 4197 while (!list_empty(&extents)) { 4198 em = list_entry(extents.next, struct extent_map, list); 4199 4200 list_del_init(&em->list); 4201 4202 /* 4203 * If we had an error we just need to delete everybody from our 4204 * private list. 4205 */ 4206 if (ret) { 4207 clear_em_logging(tree, em); 4208 free_extent_map(em); 4209 continue; 4210 } 4211 4212 write_unlock(&tree->lock); 4213 4214 ret = log_one_extent(trans, inode, root, em, path, logged_list, 4215 ctx); 4216 write_lock(&tree->lock); 4217 clear_em_logging(tree, em); 4218 free_extent_map(em); 4219 } 4220 WARN_ON(!list_empty(&extents)); 4221 write_unlock(&tree->lock); 4222 up_write(&BTRFS_I(inode)->dio_sem); 4223 4224 btrfs_release_path(path); 4225 return ret; 4226 } 4227 4228 static int logged_inode_size(struct btrfs_root *log, struct inode *inode, 4229 struct btrfs_path *path, u64 *size_ret) 4230 { 4231 struct btrfs_key key; 4232 int ret; 4233 4234 key.objectid = btrfs_ino(inode); 4235 key.type = BTRFS_INODE_ITEM_KEY; 4236 key.offset = 0; 4237 4238 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4239 if (ret < 0) { 4240 return ret; 4241 } else if (ret > 0) { 4242 *size_ret = 0; 4243 } else { 4244 struct btrfs_inode_item *item; 4245 4246 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4247 struct btrfs_inode_item); 4248 *size_ret = btrfs_inode_size(path->nodes[0], item); 4249 } 4250 4251 btrfs_release_path(path); 4252 return 0; 4253 } 4254 4255 /* 4256 * At the moment we always log all xattrs. This is to figure out at log replay 4257 * time which xattrs must have their deletion replayed. If a xattr is missing 4258 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4259 * because if a xattr is deleted, the inode is fsynced and a power failure 4260 * happens, causing the log to be replayed the next time the fs is mounted, 4261 * we want the xattr to not exist anymore (same behaviour as other filesystems 4262 * with a journal, ext3/4, xfs, f2fs, etc). 4263 */ 4264 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4265 struct btrfs_root *root, 4266 struct inode *inode, 4267 struct btrfs_path *path, 4268 struct btrfs_path *dst_path) 4269 { 4270 int ret; 4271 struct btrfs_key key; 4272 const u64 ino = btrfs_ino(inode); 4273 int ins_nr = 0; 4274 int start_slot = 0; 4275 4276 key.objectid = ino; 4277 key.type = BTRFS_XATTR_ITEM_KEY; 4278 key.offset = 0; 4279 4280 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4281 if (ret < 0) 4282 return ret; 4283 4284 while (true) { 4285 int slot = path->slots[0]; 4286 struct extent_buffer *leaf = path->nodes[0]; 4287 int nritems = btrfs_header_nritems(leaf); 4288 4289 if (slot >= nritems) { 4290 if (ins_nr > 0) { 4291 u64 last_extent = 0; 4292 4293 ret = copy_items(trans, inode, dst_path, path, 4294 &last_extent, start_slot, 4295 ins_nr, 1, 0); 4296 /* can't be 1, extent items aren't processed */ 4297 ASSERT(ret <= 0); 4298 if (ret < 0) 4299 return ret; 4300 ins_nr = 0; 4301 } 4302 ret = btrfs_next_leaf(root, path); 4303 if (ret < 0) 4304 return ret; 4305 else if (ret > 0) 4306 break; 4307 continue; 4308 } 4309 4310 btrfs_item_key_to_cpu(leaf, &key, slot); 4311 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4312 break; 4313 4314 if (ins_nr == 0) 4315 start_slot = slot; 4316 ins_nr++; 4317 path->slots[0]++; 4318 cond_resched(); 4319 } 4320 if (ins_nr > 0) { 4321 u64 last_extent = 0; 4322 4323 ret = copy_items(trans, inode, dst_path, path, 4324 &last_extent, start_slot, 4325 ins_nr, 1, 0); 4326 /* can't be 1, extent items aren't processed */ 4327 ASSERT(ret <= 0); 4328 if (ret < 0) 4329 return ret; 4330 } 4331 4332 return 0; 4333 } 4334 4335 /* 4336 * If the no holes feature is enabled we need to make sure any hole between the 4337 * last extent and the i_size of our inode is explicitly marked in the log. This 4338 * is to make sure that doing something like: 4339 * 4340 * 1) create file with 128Kb of data 4341 * 2) truncate file to 64Kb 4342 * 3) truncate file to 256Kb 4343 * 4) fsync file 4344 * 5) <crash/power failure> 4345 * 6) mount fs and trigger log replay 4346 * 4347 * Will give us a file with a size of 256Kb, the first 64Kb of data match what 4348 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the 4349 * file correspond to a hole. The presence of explicit holes in a log tree is 4350 * what guarantees that log replay will remove/adjust file extent items in the 4351 * fs/subvol tree. 4352 * 4353 * Here we do not need to care about holes between extents, that is already done 4354 * by copy_items(). We also only need to do this in the full sync path, where we 4355 * lookup for extents from the fs/subvol tree only. In the fast path case, we 4356 * lookup the list of modified extent maps and if any represents a hole, we 4357 * insert a corresponding extent representing a hole in the log tree. 4358 */ 4359 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, 4360 struct btrfs_root *root, 4361 struct inode *inode, 4362 struct btrfs_path *path) 4363 { 4364 int ret; 4365 struct btrfs_key key; 4366 u64 hole_start; 4367 u64 hole_size; 4368 struct extent_buffer *leaf; 4369 struct btrfs_root *log = root->log_root; 4370 const u64 ino = btrfs_ino(inode); 4371 const u64 i_size = i_size_read(inode); 4372 4373 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES)) 4374 return 0; 4375 4376 key.objectid = ino; 4377 key.type = BTRFS_EXTENT_DATA_KEY; 4378 key.offset = (u64)-1; 4379 4380 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4381 ASSERT(ret != 0); 4382 if (ret < 0) 4383 return ret; 4384 4385 ASSERT(path->slots[0] > 0); 4386 path->slots[0]--; 4387 leaf = path->nodes[0]; 4388 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4389 4390 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { 4391 /* inode does not have any extents */ 4392 hole_start = 0; 4393 hole_size = i_size; 4394 } else { 4395 struct btrfs_file_extent_item *extent; 4396 u64 len; 4397 4398 /* 4399 * If there's an extent beyond i_size, an explicit hole was 4400 * already inserted by copy_items(). 4401 */ 4402 if (key.offset >= i_size) 4403 return 0; 4404 4405 extent = btrfs_item_ptr(leaf, path->slots[0], 4406 struct btrfs_file_extent_item); 4407 4408 if (btrfs_file_extent_type(leaf, extent) == 4409 BTRFS_FILE_EXTENT_INLINE) { 4410 len = btrfs_file_extent_inline_len(leaf, 4411 path->slots[0], 4412 extent); 4413 ASSERT(len == i_size); 4414 return 0; 4415 } 4416 4417 len = btrfs_file_extent_num_bytes(leaf, extent); 4418 /* Last extent goes beyond i_size, no need to log a hole. */ 4419 if (key.offset + len > i_size) 4420 return 0; 4421 hole_start = key.offset + len; 4422 hole_size = i_size - hole_start; 4423 } 4424 btrfs_release_path(path); 4425 4426 /* Last extent ends at i_size. */ 4427 if (hole_size == 0) 4428 return 0; 4429 4430 hole_size = ALIGN(hole_size, root->sectorsize); 4431 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, 4432 hole_size, 0, hole_size, 0, 0, 0); 4433 return ret; 4434 } 4435 4436 /* 4437 * When we are logging a new inode X, check if it doesn't have a reference that 4438 * matches the reference from some other inode Y created in a past transaction 4439 * and that was renamed in the current transaction. If we don't do this, then at 4440 * log replay time we can lose inode Y (and all its files if it's a directory): 4441 * 4442 * mkdir /mnt/x 4443 * echo "hello world" > /mnt/x/foobar 4444 * sync 4445 * mv /mnt/x /mnt/y 4446 * mkdir /mnt/x # or touch /mnt/x 4447 * xfs_io -c fsync /mnt/x 4448 * <power fail> 4449 * mount fs, trigger log replay 4450 * 4451 * After the log replay procedure, we would lose the first directory and all its 4452 * files (file foobar). 4453 * For the case where inode Y is not a directory we simply end up losing it: 4454 * 4455 * echo "123" > /mnt/foo 4456 * sync 4457 * mv /mnt/foo /mnt/bar 4458 * echo "abc" > /mnt/foo 4459 * xfs_io -c fsync /mnt/foo 4460 * <power fail> 4461 * 4462 * We also need this for cases where a snapshot entry is replaced by some other 4463 * entry (file or directory) otherwise we end up with an unreplayable log due to 4464 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4465 * if it were a regular entry: 4466 * 4467 * mkdir /mnt/x 4468 * btrfs subvolume snapshot /mnt /mnt/x/snap 4469 * btrfs subvolume delete /mnt/x/snap 4470 * rmdir /mnt/x 4471 * mkdir /mnt/x 4472 * fsync /mnt/x or fsync some new file inside it 4473 * <power fail> 4474 * 4475 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4476 * the same transaction. 4477 */ 4478 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4479 const int slot, 4480 const struct btrfs_key *key, 4481 struct inode *inode, 4482 u64 *other_ino) 4483 { 4484 int ret; 4485 struct btrfs_path *search_path; 4486 char *name = NULL; 4487 u32 name_len = 0; 4488 u32 item_size = btrfs_item_size_nr(eb, slot); 4489 u32 cur_offset = 0; 4490 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4491 4492 search_path = btrfs_alloc_path(); 4493 if (!search_path) 4494 return -ENOMEM; 4495 search_path->search_commit_root = 1; 4496 search_path->skip_locking = 1; 4497 4498 while (cur_offset < item_size) { 4499 u64 parent; 4500 u32 this_name_len; 4501 u32 this_len; 4502 unsigned long name_ptr; 4503 struct btrfs_dir_item *di; 4504 4505 if (key->type == BTRFS_INODE_REF_KEY) { 4506 struct btrfs_inode_ref *iref; 4507 4508 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4509 parent = key->offset; 4510 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4511 name_ptr = (unsigned long)(iref + 1); 4512 this_len = sizeof(*iref) + this_name_len; 4513 } else { 4514 struct btrfs_inode_extref *extref; 4515 4516 extref = (struct btrfs_inode_extref *)(ptr + 4517 cur_offset); 4518 parent = btrfs_inode_extref_parent(eb, extref); 4519 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4520 name_ptr = (unsigned long)&extref->name; 4521 this_len = sizeof(*extref) + this_name_len; 4522 } 4523 4524 if (this_name_len > name_len) { 4525 char *new_name; 4526 4527 new_name = krealloc(name, this_name_len, GFP_NOFS); 4528 if (!new_name) { 4529 ret = -ENOMEM; 4530 goto out; 4531 } 4532 name_len = this_name_len; 4533 name = new_name; 4534 } 4535 4536 read_extent_buffer(eb, name, name_ptr, this_name_len); 4537 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root, 4538 search_path, parent, 4539 name, this_name_len, 0); 4540 if (di && !IS_ERR(di)) { 4541 struct btrfs_key di_key; 4542 4543 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4544 di, &di_key); 4545 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4546 ret = 1; 4547 *other_ino = di_key.objectid; 4548 } else { 4549 ret = -EAGAIN; 4550 } 4551 goto out; 4552 } else if (IS_ERR(di)) { 4553 ret = PTR_ERR(di); 4554 goto out; 4555 } 4556 btrfs_release_path(search_path); 4557 4558 cur_offset += this_len; 4559 } 4560 ret = 0; 4561 out: 4562 btrfs_free_path(search_path); 4563 kfree(name); 4564 return ret; 4565 } 4566 4567 /* log a single inode in the tree log. 4568 * At least one parent directory for this inode must exist in the tree 4569 * or be logged already. 4570 * 4571 * Any items from this inode changed by the current transaction are copied 4572 * to the log tree. An extra reference is taken on any extents in this 4573 * file, allowing us to avoid a whole pile of corner cases around logging 4574 * blocks that have been removed from the tree. 4575 * 4576 * See LOG_INODE_ALL and related defines for a description of what inode_only 4577 * does. 4578 * 4579 * This handles both files and directories. 4580 */ 4581 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 4582 struct btrfs_root *root, struct inode *inode, 4583 int inode_only, 4584 const loff_t start, 4585 const loff_t end, 4586 struct btrfs_log_ctx *ctx) 4587 { 4588 struct btrfs_path *path; 4589 struct btrfs_path *dst_path; 4590 struct btrfs_key min_key; 4591 struct btrfs_key max_key; 4592 struct btrfs_root *log = root->log_root; 4593 struct extent_buffer *src = NULL; 4594 LIST_HEAD(logged_list); 4595 u64 last_extent = 0; 4596 int err = 0; 4597 int ret; 4598 int nritems; 4599 int ins_start_slot = 0; 4600 int ins_nr; 4601 bool fast_search = false; 4602 u64 ino = btrfs_ino(inode); 4603 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4604 u64 logged_isize = 0; 4605 bool need_log_inode_item = true; 4606 4607 path = btrfs_alloc_path(); 4608 if (!path) 4609 return -ENOMEM; 4610 dst_path = btrfs_alloc_path(); 4611 if (!dst_path) { 4612 btrfs_free_path(path); 4613 return -ENOMEM; 4614 } 4615 4616 min_key.objectid = ino; 4617 min_key.type = BTRFS_INODE_ITEM_KEY; 4618 min_key.offset = 0; 4619 4620 max_key.objectid = ino; 4621 4622 4623 /* today the code can only do partial logging of directories */ 4624 if (S_ISDIR(inode->i_mode) || 4625 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4626 &BTRFS_I(inode)->runtime_flags) && 4627 inode_only == LOG_INODE_EXISTS)) 4628 max_key.type = BTRFS_XATTR_ITEM_KEY; 4629 else 4630 max_key.type = (u8)-1; 4631 max_key.offset = (u64)-1; 4632 4633 /* 4634 * Only run delayed items if we are a dir or a new file. 4635 * Otherwise commit the delayed inode only, which is needed in 4636 * order for the log replay code to mark inodes for link count 4637 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). 4638 */ 4639 if (S_ISDIR(inode->i_mode) || 4640 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) 4641 ret = btrfs_commit_inode_delayed_items(trans, inode); 4642 else 4643 ret = btrfs_commit_inode_delayed_inode(inode); 4644 4645 if (ret) { 4646 btrfs_free_path(path); 4647 btrfs_free_path(dst_path); 4648 return ret; 4649 } 4650 4651 mutex_lock(&BTRFS_I(inode)->log_mutex); 4652 4653 /* 4654 * a brute force approach to making sure we get the most uptodate 4655 * copies of everything. 4656 */ 4657 if (S_ISDIR(inode->i_mode)) { 4658 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 4659 4660 if (inode_only == LOG_INODE_EXISTS) 4661 max_key_type = BTRFS_XATTR_ITEM_KEY; 4662 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 4663 } else { 4664 if (inode_only == LOG_INODE_EXISTS) { 4665 /* 4666 * Make sure the new inode item we write to the log has 4667 * the same isize as the current one (if it exists). 4668 * This is necessary to prevent data loss after log 4669 * replay, and also to prevent doing a wrong expanding 4670 * truncate - for e.g. create file, write 4K into offset 4671 * 0, fsync, write 4K into offset 4096, add hard link, 4672 * fsync some other file (to sync log), power fail - if 4673 * we use the inode's current i_size, after log replay 4674 * we get a 8Kb file, with the last 4Kb extent as a hole 4675 * (zeroes), as if an expanding truncate happened, 4676 * instead of getting a file of 4Kb only. 4677 */ 4678 err = logged_inode_size(log, inode, path, 4679 &logged_isize); 4680 if (err) 4681 goto out_unlock; 4682 } 4683 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4684 &BTRFS_I(inode)->runtime_flags)) { 4685 if (inode_only == LOG_INODE_EXISTS) { 4686 max_key.type = BTRFS_XATTR_ITEM_KEY; 4687 ret = drop_objectid_items(trans, log, path, ino, 4688 max_key.type); 4689 } else { 4690 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4691 &BTRFS_I(inode)->runtime_flags); 4692 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4693 &BTRFS_I(inode)->runtime_flags); 4694 while(1) { 4695 ret = btrfs_truncate_inode_items(trans, 4696 log, inode, 0, 0); 4697 if (ret != -EAGAIN) 4698 break; 4699 } 4700 } 4701 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4702 &BTRFS_I(inode)->runtime_flags) || 4703 inode_only == LOG_INODE_EXISTS) { 4704 if (inode_only == LOG_INODE_ALL) 4705 fast_search = true; 4706 max_key.type = BTRFS_XATTR_ITEM_KEY; 4707 ret = drop_objectid_items(trans, log, path, ino, 4708 max_key.type); 4709 } else { 4710 if (inode_only == LOG_INODE_ALL) 4711 fast_search = true; 4712 goto log_extents; 4713 } 4714 4715 } 4716 if (ret) { 4717 err = ret; 4718 goto out_unlock; 4719 } 4720 4721 while (1) { 4722 ins_nr = 0; 4723 ret = btrfs_search_forward(root, &min_key, 4724 path, trans->transid); 4725 if (ret < 0) { 4726 err = ret; 4727 goto out_unlock; 4728 } 4729 if (ret != 0) 4730 break; 4731 again: 4732 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 4733 if (min_key.objectid != ino) 4734 break; 4735 if (min_key.type > max_key.type) 4736 break; 4737 4738 if (min_key.type == BTRFS_INODE_ITEM_KEY) 4739 need_log_inode_item = false; 4740 4741 if ((min_key.type == BTRFS_INODE_REF_KEY || 4742 min_key.type == BTRFS_INODE_EXTREF_KEY) && 4743 BTRFS_I(inode)->generation == trans->transid) { 4744 u64 other_ino = 0; 4745 4746 ret = btrfs_check_ref_name_override(path->nodes[0], 4747 path->slots[0], 4748 &min_key, inode, 4749 &other_ino); 4750 if (ret < 0) { 4751 err = ret; 4752 goto out_unlock; 4753 } else if (ret > 0 && ctx && 4754 other_ino != btrfs_ino(ctx->inode)) { 4755 struct btrfs_key inode_key; 4756 struct inode *other_inode; 4757 4758 if (ins_nr > 0) { 4759 ins_nr++; 4760 } else { 4761 ins_nr = 1; 4762 ins_start_slot = path->slots[0]; 4763 } 4764 ret = copy_items(trans, inode, dst_path, path, 4765 &last_extent, ins_start_slot, 4766 ins_nr, inode_only, 4767 logged_isize); 4768 if (ret < 0) { 4769 err = ret; 4770 goto out_unlock; 4771 } 4772 ins_nr = 0; 4773 btrfs_release_path(path); 4774 inode_key.objectid = other_ino; 4775 inode_key.type = BTRFS_INODE_ITEM_KEY; 4776 inode_key.offset = 0; 4777 other_inode = btrfs_iget(root->fs_info->sb, 4778 &inode_key, root, 4779 NULL); 4780 /* 4781 * If the other inode that had a conflicting dir 4782 * entry was deleted in the current transaction, 4783 * we don't need to do more work nor fallback to 4784 * a transaction commit. 4785 */ 4786 if (IS_ERR(other_inode) && 4787 PTR_ERR(other_inode) == -ENOENT) { 4788 goto next_key; 4789 } else if (IS_ERR(other_inode)) { 4790 err = PTR_ERR(other_inode); 4791 goto out_unlock; 4792 } 4793 /* 4794 * We are safe logging the other inode without 4795 * acquiring its i_mutex as long as we log with 4796 * the LOG_INODE_EXISTS mode. We're safe against 4797 * concurrent renames of the other inode as well 4798 * because during a rename we pin the log and 4799 * update the log with the new name before we 4800 * unpin it. 4801 */ 4802 err = btrfs_log_inode(trans, root, other_inode, 4803 LOG_INODE_EXISTS, 4804 0, LLONG_MAX, ctx); 4805 iput(other_inode); 4806 if (err) 4807 goto out_unlock; 4808 else 4809 goto next_key; 4810 } 4811 } 4812 4813 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 4814 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 4815 if (ins_nr == 0) 4816 goto next_slot; 4817 ret = copy_items(trans, inode, dst_path, path, 4818 &last_extent, ins_start_slot, 4819 ins_nr, inode_only, logged_isize); 4820 if (ret < 0) { 4821 err = ret; 4822 goto out_unlock; 4823 } 4824 ins_nr = 0; 4825 if (ret) { 4826 btrfs_release_path(path); 4827 continue; 4828 } 4829 goto next_slot; 4830 } 4831 4832 src = path->nodes[0]; 4833 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 4834 ins_nr++; 4835 goto next_slot; 4836 } else if (!ins_nr) { 4837 ins_start_slot = path->slots[0]; 4838 ins_nr = 1; 4839 goto next_slot; 4840 } 4841 4842 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4843 ins_start_slot, ins_nr, inode_only, 4844 logged_isize); 4845 if (ret < 0) { 4846 err = ret; 4847 goto out_unlock; 4848 } 4849 if (ret) { 4850 ins_nr = 0; 4851 btrfs_release_path(path); 4852 continue; 4853 } 4854 ins_nr = 1; 4855 ins_start_slot = path->slots[0]; 4856 next_slot: 4857 4858 nritems = btrfs_header_nritems(path->nodes[0]); 4859 path->slots[0]++; 4860 if (path->slots[0] < nritems) { 4861 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 4862 path->slots[0]); 4863 goto again; 4864 } 4865 if (ins_nr) { 4866 ret = copy_items(trans, inode, dst_path, path, 4867 &last_extent, ins_start_slot, 4868 ins_nr, inode_only, logged_isize); 4869 if (ret < 0) { 4870 err = ret; 4871 goto out_unlock; 4872 } 4873 ret = 0; 4874 ins_nr = 0; 4875 } 4876 btrfs_release_path(path); 4877 next_key: 4878 if (min_key.offset < (u64)-1) { 4879 min_key.offset++; 4880 } else if (min_key.type < max_key.type) { 4881 min_key.type++; 4882 min_key.offset = 0; 4883 } else { 4884 break; 4885 } 4886 } 4887 if (ins_nr) { 4888 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4889 ins_start_slot, ins_nr, inode_only, 4890 logged_isize); 4891 if (ret < 0) { 4892 err = ret; 4893 goto out_unlock; 4894 } 4895 ret = 0; 4896 ins_nr = 0; 4897 } 4898 4899 btrfs_release_path(path); 4900 btrfs_release_path(dst_path); 4901 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 4902 if (err) 4903 goto out_unlock; 4904 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 4905 btrfs_release_path(path); 4906 btrfs_release_path(dst_path); 4907 err = btrfs_log_trailing_hole(trans, root, inode, path); 4908 if (err) 4909 goto out_unlock; 4910 } 4911 log_extents: 4912 btrfs_release_path(path); 4913 btrfs_release_path(dst_path); 4914 if (need_log_inode_item) { 4915 err = log_inode_item(trans, log, dst_path, inode); 4916 if (err) 4917 goto out_unlock; 4918 } 4919 if (fast_search) { 4920 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 4921 &logged_list, ctx, start, end); 4922 if (ret) { 4923 err = ret; 4924 goto out_unlock; 4925 } 4926 } else if (inode_only == LOG_INODE_ALL) { 4927 struct extent_map *em, *n; 4928 4929 write_lock(&em_tree->lock); 4930 /* 4931 * We can't just remove every em if we're called for a ranged 4932 * fsync - that is, one that doesn't cover the whole possible 4933 * file range (0 to LLONG_MAX). This is because we can have 4934 * em's that fall outside the range we're logging and therefore 4935 * their ordered operations haven't completed yet 4936 * (btrfs_finish_ordered_io() not invoked yet). This means we 4937 * didn't get their respective file extent item in the fs/subvol 4938 * tree yet, and need to let the next fast fsync (one which 4939 * consults the list of modified extent maps) find the em so 4940 * that it logs a matching file extent item and waits for the 4941 * respective ordered operation to complete (if it's still 4942 * running). 4943 * 4944 * Removing every em outside the range we're logging would make 4945 * the next fast fsync not log their matching file extent items, 4946 * therefore making us lose data after a log replay. 4947 */ 4948 list_for_each_entry_safe(em, n, &em_tree->modified_extents, 4949 list) { 4950 const u64 mod_end = em->mod_start + em->mod_len - 1; 4951 4952 if (em->mod_start >= start && mod_end <= end) 4953 list_del_init(&em->list); 4954 } 4955 write_unlock(&em_tree->lock); 4956 } 4957 4958 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 4959 ret = log_directory_changes(trans, root, inode, path, dst_path, 4960 ctx); 4961 if (ret) { 4962 err = ret; 4963 goto out_unlock; 4964 } 4965 } 4966 4967 spin_lock(&BTRFS_I(inode)->lock); 4968 BTRFS_I(inode)->logged_trans = trans->transid; 4969 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; 4970 spin_unlock(&BTRFS_I(inode)->lock); 4971 out_unlock: 4972 if (unlikely(err)) 4973 btrfs_put_logged_extents(&logged_list); 4974 else 4975 btrfs_submit_logged_extents(&logged_list, log); 4976 mutex_unlock(&BTRFS_I(inode)->log_mutex); 4977 4978 btrfs_free_path(path); 4979 btrfs_free_path(dst_path); 4980 return err; 4981 } 4982 4983 /* 4984 * Check if we must fallback to a transaction commit when logging an inode. 4985 * This must be called after logging the inode and is used only in the context 4986 * when fsyncing an inode requires the need to log some other inode - in which 4987 * case we can't lock the i_mutex of each other inode we need to log as that 4988 * can lead to deadlocks with concurrent fsync against other inodes (as we can 4989 * log inodes up or down in the hierarchy) or rename operations for example. So 4990 * we take the log_mutex of the inode after we have logged it and then check for 4991 * its last_unlink_trans value - this is safe because any task setting 4992 * last_unlink_trans must take the log_mutex and it must do this before it does 4993 * the actual unlink operation, so if we do this check before a concurrent task 4994 * sets last_unlink_trans it means we've logged a consistent version/state of 4995 * all the inode items, otherwise we are not sure and must do a transaction 4996 * commit (the concurrent task might have only updated last_unlink_trans before 4997 * we logged the inode or it might have also done the unlink). 4998 */ 4999 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 5000 struct inode *inode) 5001 { 5002 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 5003 bool ret = false; 5004 5005 mutex_lock(&BTRFS_I(inode)->log_mutex); 5006 if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) { 5007 /* 5008 * Make sure any commits to the log are forced to be full 5009 * commits. 5010 */ 5011 btrfs_set_log_full_commit(fs_info, trans); 5012 ret = true; 5013 } 5014 mutex_unlock(&BTRFS_I(inode)->log_mutex); 5015 5016 return ret; 5017 } 5018 5019 /* 5020 * follow the dentry parent pointers up the chain and see if any 5021 * of the directories in it require a full commit before they can 5022 * be logged. Returns zero if nothing special needs to be done or 1 if 5023 * a full commit is required. 5024 */ 5025 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 5026 struct inode *inode, 5027 struct dentry *parent, 5028 struct super_block *sb, 5029 u64 last_committed) 5030 { 5031 int ret = 0; 5032 struct dentry *old_parent = NULL; 5033 struct inode *orig_inode = inode; 5034 5035 /* 5036 * for regular files, if its inode is already on disk, we don't 5037 * have to worry about the parents at all. This is because 5038 * we can use the last_unlink_trans field to record renames 5039 * and other fun in this file. 5040 */ 5041 if (S_ISREG(inode->i_mode) && 5042 BTRFS_I(inode)->generation <= last_committed && 5043 BTRFS_I(inode)->last_unlink_trans <= last_committed) 5044 goto out; 5045 5046 if (!S_ISDIR(inode->i_mode)) { 5047 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5048 goto out; 5049 inode = d_inode(parent); 5050 } 5051 5052 while (1) { 5053 /* 5054 * If we are logging a directory then we start with our inode, 5055 * not our parent's inode, so we need to skip setting the 5056 * logged_trans so that further down in the log code we don't 5057 * think this inode has already been logged. 5058 */ 5059 if (inode != orig_inode) 5060 BTRFS_I(inode)->logged_trans = trans->transid; 5061 smp_mb(); 5062 5063 if (btrfs_must_commit_transaction(trans, inode)) { 5064 ret = 1; 5065 break; 5066 } 5067 5068 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5069 break; 5070 5071 if (IS_ROOT(parent)) { 5072 inode = d_inode(parent); 5073 if (btrfs_must_commit_transaction(trans, inode)) 5074 ret = 1; 5075 break; 5076 } 5077 5078 parent = dget_parent(parent); 5079 dput(old_parent); 5080 old_parent = parent; 5081 inode = d_inode(parent); 5082 5083 } 5084 dput(old_parent); 5085 out: 5086 return ret; 5087 } 5088 5089 struct btrfs_dir_list { 5090 u64 ino; 5091 struct list_head list; 5092 }; 5093 5094 /* 5095 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5096 * details about the why it is needed. 5097 * This is a recursive operation - if an existing dentry corresponds to a 5098 * directory, that directory's new entries are logged too (same behaviour as 5099 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5100 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5101 * complains about the following circular lock dependency / possible deadlock: 5102 * 5103 * CPU0 CPU1 5104 * ---- ---- 5105 * lock(&type->i_mutex_dir_key#3/2); 5106 * lock(sb_internal#2); 5107 * lock(&type->i_mutex_dir_key#3/2); 5108 * lock(&sb->s_type->i_mutex_key#14); 5109 * 5110 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5111 * sb_start_intwrite() in btrfs_start_transaction(). 5112 * Not locking i_mutex of the inodes is still safe because: 5113 * 5114 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5115 * that while logging the inode new references (names) are added or removed 5116 * from the inode, leaving the logged inode item with a link count that does 5117 * not match the number of logged inode reference items. This is fine because 5118 * at log replay time we compute the real number of links and correct the 5119 * link count in the inode item (see replay_one_buffer() and 5120 * link_to_fixup_dir()); 5121 * 5122 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5123 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5124 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5125 * has a size that doesn't match the sum of the lengths of all the logged 5126 * names. This does not result in a problem because if a dir_item key is 5127 * logged but its matching dir_index key is not logged, at log replay time we 5128 * don't use it to replay the respective name (see replay_one_name()). On the 5129 * other hand if only the dir_index key ends up being logged, the respective 5130 * name is added to the fs/subvol tree with both the dir_item and dir_index 5131 * keys created (see replay_one_name()). 5132 * The directory's inode item with a wrong i_size is not a problem as well, 5133 * since we don't use it at log replay time to set the i_size in the inode 5134 * item of the fs/subvol tree (see overwrite_item()). 5135 */ 5136 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5137 struct btrfs_root *root, 5138 struct inode *start_inode, 5139 struct btrfs_log_ctx *ctx) 5140 { 5141 struct btrfs_root *log = root->log_root; 5142 struct btrfs_path *path; 5143 LIST_HEAD(dir_list); 5144 struct btrfs_dir_list *dir_elem; 5145 int ret = 0; 5146 5147 path = btrfs_alloc_path(); 5148 if (!path) 5149 return -ENOMEM; 5150 5151 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5152 if (!dir_elem) { 5153 btrfs_free_path(path); 5154 return -ENOMEM; 5155 } 5156 dir_elem->ino = btrfs_ino(start_inode); 5157 list_add_tail(&dir_elem->list, &dir_list); 5158 5159 while (!list_empty(&dir_list)) { 5160 struct extent_buffer *leaf; 5161 struct btrfs_key min_key; 5162 int nritems; 5163 int i; 5164 5165 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5166 list); 5167 if (ret) 5168 goto next_dir_inode; 5169 5170 min_key.objectid = dir_elem->ino; 5171 min_key.type = BTRFS_DIR_ITEM_KEY; 5172 min_key.offset = 0; 5173 again: 5174 btrfs_release_path(path); 5175 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5176 if (ret < 0) { 5177 goto next_dir_inode; 5178 } else if (ret > 0) { 5179 ret = 0; 5180 goto next_dir_inode; 5181 } 5182 5183 process_leaf: 5184 leaf = path->nodes[0]; 5185 nritems = btrfs_header_nritems(leaf); 5186 for (i = path->slots[0]; i < nritems; i++) { 5187 struct btrfs_dir_item *di; 5188 struct btrfs_key di_key; 5189 struct inode *di_inode; 5190 struct btrfs_dir_list *new_dir_elem; 5191 int log_mode = LOG_INODE_EXISTS; 5192 int type; 5193 5194 btrfs_item_key_to_cpu(leaf, &min_key, i); 5195 if (min_key.objectid != dir_elem->ino || 5196 min_key.type != BTRFS_DIR_ITEM_KEY) 5197 goto next_dir_inode; 5198 5199 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5200 type = btrfs_dir_type(leaf, di); 5201 if (btrfs_dir_transid(leaf, di) < trans->transid && 5202 type != BTRFS_FT_DIR) 5203 continue; 5204 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5205 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5206 continue; 5207 5208 di_inode = btrfs_iget(root->fs_info->sb, &di_key, 5209 root, NULL); 5210 if (IS_ERR(di_inode)) { 5211 ret = PTR_ERR(di_inode); 5212 goto next_dir_inode; 5213 } 5214 5215 if (btrfs_inode_in_log(di_inode, trans->transid)) { 5216 iput(di_inode); 5217 continue; 5218 } 5219 5220 ctx->log_new_dentries = false; 5221 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5222 log_mode = LOG_INODE_ALL; 5223 btrfs_release_path(path); 5224 ret = btrfs_log_inode(trans, root, di_inode, 5225 log_mode, 0, LLONG_MAX, ctx); 5226 if (!ret && 5227 btrfs_must_commit_transaction(trans, di_inode)) 5228 ret = 1; 5229 iput(di_inode); 5230 if (ret) 5231 goto next_dir_inode; 5232 if (ctx->log_new_dentries) { 5233 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5234 GFP_NOFS); 5235 if (!new_dir_elem) { 5236 ret = -ENOMEM; 5237 goto next_dir_inode; 5238 } 5239 new_dir_elem->ino = di_key.objectid; 5240 list_add_tail(&new_dir_elem->list, &dir_list); 5241 } 5242 break; 5243 } 5244 if (i == nritems) { 5245 ret = btrfs_next_leaf(log, path); 5246 if (ret < 0) { 5247 goto next_dir_inode; 5248 } else if (ret > 0) { 5249 ret = 0; 5250 goto next_dir_inode; 5251 } 5252 goto process_leaf; 5253 } 5254 if (min_key.offset < (u64)-1) { 5255 min_key.offset++; 5256 goto again; 5257 } 5258 next_dir_inode: 5259 list_del(&dir_elem->list); 5260 kfree(dir_elem); 5261 } 5262 5263 btrfs_free_path(path); 5264 return ret; 5265 } 5266 5267 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5268 struct inode *inode, 5269 struct btrfs_log_ctx *ctx) 5270 { 5271 int ret; 5272 struct btrfs_path *path; 5273 struct btrfs_key key; 5274 struct btrfs_root *root = BTRFS_I(inode)->root; 5275 const u64 ino = btrfs_ino(inode); 5276 5277 path = btrfs_alloc_path(); 5278 if (!path) 5279 return -ENOMEM; 5280 path->skip_locking = 1; 5281 path->search_commit_root = 1; 5282 5283 key.objectid = ino; 5284 key.type = BTRFS_INODE_REF_KEY; 5285 key.offset = 0; 5286 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5287 if (ret < 0) 5288 goto out; 5289 5290 while (true) { 5291 struct extent_buffer *leaf = path->nodes[0]; 5292 int slot = path->slots[0]; 5293 u32 cur_offset = 0; 5294 u32 item_size; 5295 unsigned long ptr; 5296 5297 if (slot >= btrfs_header_nritems(leaf)) { 5298 ret = btrfs_next_leaf(root, path); 5299 if (ret < 0) 5300 goto out; 5301 else if (ret > 0) 5302 break; 5303 continue; 5304 } 5305 5306 btrfs_item_key_to_cpu(leaf, &key, slot); 5307 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5308 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5309 break; 5310 5311 item_size = btrfs_item_size_nr(leaf, slot); 5312 ptr = btrfs_item_ptr_offset(leaf, slot); 5313 while (cur_offset < item_size) { 5314 struct btrfs_key inode_key; 5315 struct inode *dir_inode; 5316 5317 inode_key.type = BTRFS_INODE_ITEM_KEY; 5318 inode_key.offset = 0; 5319 5320 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5321 struct btrfs_inode_extref *extref; 5322 5323 extref = (struct btrfs_inode_extref *) 5324 (ptr + cur_offset); 5325 inode_key.objectid = btrfs_inode_extref_parent( 5326 leaf, extref); 5327 cur_offset += sizeof(*extref); 5328 cur_offset += btrfs_inode_extref_name_len(leaf, 5329 extref); 5330 } else { 5331 inode_key.objectid = key.offset; 5332 cur_offset = item_size; 5333 } 5334 5335 dir_inode = btrfs_iget(root->fs_info->sb, &inode_key, 5336 root, NULL); 5337 /* If parent inode was deleted, skip it. */ 5338 if (IS_ERR(dir_inode)) 5339 continue; 5340 5341 if (ctx) 5342 ctx->log_new_dentries = false; 5343 ret = btrfs_log_inode(trans, root, dir_inode, 5344 LOG_INODE_ALL, 0, LLONG_MAX, ctx); 5345 if (!ret && 5346 btrfs_must_commit_transaction(trans, dir_inode)) 5347 ret = 1; 5348 if (!ret && ctx && ctx->log_new_dentries) 5349 ret = log_new_dir_dentries(trans, root, 5350 dir_inode, ctx); 5351 iput(dir_inode); 5352 if (ret) 5353 goto out; 5354 } 5355 path->slots[0]++; 5356 } 5357 ret = 0; 5358 out: 5359 btrfs_free_path(path); 5360 return ret; 5361 } 5362 5363 /* 5364 * helper function around btrfs_log_inode to make sure newly created 5365 * parent directories also end up in the log. A minimal inode and backref 5366 * only logging is done of any parent directories that are older than 5367 * the last committed transaction 5368 */ 5369 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 5370 struct btrfs_root *root, struct inode *inode, 5371 struct dentry *parent, 5372 const loff_t start, 5373 const loff_t end, 5374 int exists_only, 5375 struct btrfs_log_ctx *ctx) 5376 { 5377 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 5378 struct super_block *sb; 5379 struct dentry *old_parent = NULL; 5380 int ret = 0; 5381 u64 last_committed = root->fs_info->last_trans_committed; 5382 bool log_dentries = false; 5383 struct inode *orig_inode = inode; 5384 5385 sb = inode->i_sb; 5386 5387 if (btrfs_test_opt(root->fs_info, NOTREELOG)) { 5388 ret = 1; 5389 goto end_no_trans; 5390 } 5391 5392 /* 5393 * The prev transaction commit doesn't complete, we need do 5394 * full commit by ourselves. 5395 */ 5396 if (root->fs_info->last_trans_log_full_commit > 5397 root->fs_info->last_trans_committed) { 5398 ret = 1; 5399 goto end_no_trans; 5400 } 5401 5402 if (root != BTRFS_I(inode)->root || 5403 btrfs_root_refs(&root->root_item) == 0) { 5404 ret = 1; 5405 goto end_no_trans; 5406 } 5407 5408 ret = check_parent_dirs_for_sync(trans, inode, parent, 5409 sb, last_committed); 5410 if (ret) 5411 goto end_no_trans; 5412 5413 if (btrfs_inode_in_log(inode, trans->transid)) { 5414 ret = BTRFS_NO_LOG_SYNC; 5415 goto end_no_trans; 5416 } 5417 5418 ret = start_log_trans(trans, root, ctx); 5419 if (ret) 5420 goto end_no_trans; 5421 5422 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); 5423 if (ret) 5424 goto end_trans; 5425 5426 /* 5427 * for regular files, if its inode is already on disk, we don't 5428 * have to worry about the parents at all. This is because 5429 * we can use the last_unlink_trans field to record renames 5430 * and other fun in this file. 5431 */ 5432 if (S_ISREG(inode->i_mode) && 5433 BTRFS_I(inode)->generation <= last_committed && 5434 BTRFS_I(inode)->last_unlink_trans <= last_committed) { 5435 ret = 0; 5436 goto end_trans; 5437 } 5438 5439 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries) 5440 log_dentries = true; 5441 5442 /* 5443 * On unlink we must make sure all our current and old parent directory 5444 * inodes are fully logged. This is to prevent leaving dangling 5445 * directory index entries in directories that were our parents but are 5446 * not anymore. Not doing this results in old parent directory being 5447 * impossible to delete after log replay (rmdir will always fail with 5448 * error -ENOTEMPTY). 5449 * 5450 * Example 1: 5451 * 5452 * mkdir testdir 5453 * touch testdir/foo 5454 * ln testdir/foo testdir/bar 5455 * sync 5456 * unlink testdir/bar 5457 * xfs_io -c fsync testdir/foo 5458 * <power failure> 5459 * mount fs, triggers log replay 5460 * 5461 * If we don't log the parent directory (testdir), after log replay the 5462 * directory still has an entry pointing to the file inode using the bar 5463 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 5464 * the file inode has a link count of 1. 5465 * 5466 * Example 2: 5467 * 5468 * mkdir testdir 5469 * touch foo 5470 * ln foo testdir/foo2 5471 * ln foo testdir/foo3 5472 * sync 5473 * unlink testdir/foo3 5474 * xfs_io -c fsync foo 5475 * <power failure> 5476 * mount fs, triggers log replay 5477 * 5478 * Similar as the first example, after log replay the parent directory 5479 * testdir still has an entry pointing to the inode file with name foo3 5480 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 5481 * and has a link count of 2. 5482 */ 5483 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 5484 ret = btrfs_log_all_parents(trans, orig_inode, ctx); 5485 if (ret) 5486 goto end_trans; 5487 } 5488 5489 while (1) { 5490 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5491 break; 5492 5493 inode = d_inode(parent); 5494 if (root != BTRFS_I(inode)->root) 5495 break; 5496 5497 if (BTRFS_I(inode)->generation > last_committed) { 5498 ret = btrfs_log_inode(trans, root, inode, 5499 LOG_INODE_EXISTS, 5500 0, LLONG_MAX, ctx); 5501 if (ret) 5502 goto end_trans; 5503 } 5504 if (IS_ROOT(parent)) 5505 break; 5506 5507 parent = dget_parent(parent); 5508 dput(old_parent); 5509 old_parent = parent; 5510 } 5511 if (log_dentries) 5512 ret = log_new_dir_dentries(trans, root, orig_inode, ctx); 5513 else 5514 ret = 0; 5515 end_trans: 5516 dput(old_parent); 5517 if (ret < 0) { 5518 btrfs_set_log_full_commit(root->fs_info, trans); 5519 ret = 1; 5520 } 5521 5522 if (ret) 5523 btrfs_remove_log_ctx(root, ctx); 5524 btrfs_end_log_trans(root); 5525 end_no_trans: 5526 return ret; 5527 } 5528 5529 /* 5530 * it is not safe to log dentry if the chunk root has added new 5531 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 5532 * If this returns 1, you must commit the transaction to safely get your 5533 * data on disk. 5534 */ 5535 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 5536 struct btrfs_root *root, struct dentry *dentry, 5537 const loff_t start, 5538 const loff_t end, 5539 struct btrfs_log_ctx *ctx) 5540 { 5541 struct dentry *parent = dget_parent(dentry); 5542 int ret; 5543 5544 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent, 5545 start, end, 0, ctx); 5546 dput(parent); 5547 5548 return ret; 5549 } 5550 5551 /* 5552 * should be called during mount to recover any replay any log trees 5553 * from the FS 5554 */ 5555 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 5556 { 5557 int ret; 5558 struct btrfs_path *path; 5559 struct btrfs_trans_handle *trans; 5560 struct btrfs_key key; 5561 struct btrfs_key found_key; 5562 struct btrfs_key tmp_key; 5563 struct btrfs_root *log; 5564 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 5565 struct walk_control wc = { 5566 .process_func = process_one_buffer, 5567 .stage = 0, 5568 }; 5569 5570 path = btrfs_alloc_path(); 5571 if (!path) 5572 return -ENOMEM; 5573 5574 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 5575 5576 trans = btrfs_start_transaction(fs_info->tree_root, 0); 5577 if (IS_ERR(trans)) { 5578 ret = PTR_ERR(trans); 5579 goto error; 5580 } 5581 5582 wc.trans = trans; 5583 wc.pin = 1; 5584 5585 ret = walk_log_tree(trans, log_root_tree, &wc); 5586 if (ret) { 5587 btrfs_handle_fs_error(fs_info, ret, 5588 "Failed to pin buffers while recovering log root tree."); 5589 goto error; 5590 } 5591 5592 again: 5593 key.objectid = BTRFS_TREE_LOG_OBJECTID; 5594 key.offset = (u64)-1; 5595 key.type = BTRFS_ROOT_ITEM_KEY; 5596 5597 while (1) { 5598 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 5599 5600 if (ret < 0) { 5601 btrfs_handle_fs_error(fs_info, ret, 5602 "Couldn't find tree log root."); 5603 goto error; 5604 } 5605 if (ret > 0) { 5606 if (path->slots[0] == 0) 5607 break; 5608 path->slots[0]--; 5609 } 5610 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 5611 path->slots[0]); 5612 btrfs_release_path(path); 5613 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 5614 break; 5615 5616 log = btrfs_read_fs_root(log_root_tree, &found_key); 5617 if (IS_ERR(log)) { 5618 ret = PTR_ERR(log); 5619 btrfs_handle_fs_error(fs_info, ret, 5620 "Couldn't read tree log root."); 5621 goto error; 5622 } 5623 5624 tmp_key.objectid = found_key.offset; 5625 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 5626 tmp_key.offset = (u64)-1; 5627 5628 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 5629 if (IS_ERR(wc.replay_dest)) { 5630 ret = PTR_ERR(wc.replay_dest); 5631 free_extent_buffer(log->node); 5632 free_extent_buffer(log->commit_root); 5633 kfree(log); 5634 btrfs_handle_fs_error(fs_info, ret, 5635 "Couldn't read target root for tree log recovery."); 5636 goto error; 5637 } 5638 5639 wc.replay_dest->log_root = log; 5640 btrfs_record_root_in_trans(trans, wc.replay_dest); 5641 ret = walk_log_tree(trans, log, &wc); 5642 5643 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 5644 ret = fixup_inode_link_counts(trans, wc.replay_dest, 5645 path); 5646 } 5647 5648 key.offset = found_key.offset - 1; 5649 wc.replay_dest->log_root = NULL; 5650 free_extent_buffer(log->node); 5651 free_extent_buffer(log->commit_root); 5652 kfree(log); 5653 5654 if (ret) 5655 goto error; 5656 5657 if (found_key.offset == 0) 5658 break; 5659 } 5660 btrfs_release_path(path); 5661 5662 /* step one is to pin it all, step two is to replay just inodes */ 5663 if (wc.pin) { 5664 wc.pin = 0; 5665 wc.process_func = replay_one_buffer; 5666 wc.stage = LOG_WALK_REPLAY_INODES; 5667 goto again; 5668 } 5669 /* step three is to replay everything */ 5670 if (wc.stage < LOG_WALK_REPLAY_ALL) { 5671 wc.stage++; 5672 goto again; 5673 } 5674 5675 btrfs_free_path(path); 5676 5677 /* step 4: commit the transaction, which also unpins the blocks */ 5678 ret = btrfs_commit_transaction(trans, fs_info->tree_root); 5679 if (ret) 5680 return ret; 5681 5682 free_extent_buffer(log_root_tree->node); 5683 log_root_tree->log_root = NULL; 5684 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 5685 kfree(log_root_tree); 5686 5687 return 0; 5688 error: 5689 if (wc.trans) 5690 btrfs_end_transaction(wc.trans, fs_info->tree_root); 5691 btrfs_free_path(path); 5692 return ret; 5693 } 5694 5695 /* 5696 * there are some corner cases where we want to force a full 5697 * commit instead of allowing a directory to be logged. 5698 * 5699 * They revolve around files there were unlinked from the directory, and 5700 * this function updates the parent directory so that a full commit is 5701 * properly done if it is fsync'd later after the unlinks are done. 5702 * 5703 * Must be called before the unlink operations (updates to the subvolume tree, 5704 * inodes, etc) are done. 5705 */ 5706 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 5707 struct inode *dir, struct inode *inode, 5708 int for_rename) 5709 { 5710 /* 5711 * when we're logging a file, if it hasn't been renamed 5712 * or unlinked, and its inode is fully committed on disk, 5713 * we don't have to worry about walking up the directory chain 5714 * to log its parents. 5715 * 5716 * So, we use the last_unlink_trans field to put this transid 5717 * into the file. When the file is logged we check it and 5718 * don't log the parents if the file is fully on disk. 5719 */ 5720 mutex_lock(&BTRFS_I(inode)->log_mutex); 5721 BTRFS_I(inode)->last_unlink_trans = trans->transid; 5722 mutex_unlock(&BTRFS_I(inode)->log_mutex); 5723 5724 /* 5725 * if this directory was already logged any new 5726 * names for this file/dir will get recorded 5727 */ 5728 smp_mb(); 5729 if (BTRFS_I(dir)->logged_trans == trans->transid) 5730 return; 5731 5732 /* 5733 * if the inode we're about to unlink was logged, 5734 * the log will be properly updated for any new names 5735 */ 5736 if (BTRFS_I(inode)->logged_trans == trans->transid) 5737 return; 5738 5739 /* 5740 * when renaming files across directories, if the directory 5741 * there we're unlinking from gets fsync'd later on, there's 5742 * no way to find the destination directory later and fsync it 5743 * properly. So, we have to be conservative and force commits 5744 * so the new name gets discovered. 5745 */ 5746 if (for_rename) 5747 goto record; 5748 5749 /* we can safely do the unlink without any special recording */ 5750 return; 5751 5752 record: 5753 mutex_lock(&BTRFS_I(dir)->log_mutex); 5754 BTRFS_I(dir)->last_unlink_trans = trans->transid; 5755 mutex_unlock(&BTRFS_I(dir)->log_mutex); 5756 } 5757 5758 /* 5759 * Make sure that if someone attempts to fsync the parent directory of a deleted 5760 * snapshot, it ends up triggering a transaction commit. This is to guarantee 5761 * that after replaying the log tree of the parent directory's root we will not 5762 * see the snapshot anymore and at log replay time we will not see any log tree 5763 * corresponding to the deleted snapshot's root, which could lead to replaying 5764 * it after replaying the log tree of the parent directory (which would replay 5765 * the snapshot delete operation). 5766 * 5767 * Must be called before the actual snapshot destroy operation (updates to the 5768 * parent root and tree of tree roots trees, etc) are done. 5769 */ 5770 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 5771 struct inode *dir) 5772 { 5773 mutex_lock(&BTRFS_I(dir)->log_mutex); 5774 BTRFS_I(dir)->last_unlink_trans = trans->transid; 5775 mutex_unlock(&BTRFS_I(dir)->log_mutex); 5776 } 5777 5778 /* 5779 * Call this after adding a new name for a file and it will properly 5780 * update the log to reflect the new name. 5781 * 5782 * It will return zero if all goes well, and it will return 1 if a 5783 * full transaction commit is required. 5784 */ 5785 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 5786 struct inode *inode, struct inode *old_dir, 5787 struct dentry *parent) 5788 { 5789 struct btrfs_root * root = BTRFS_I(inode)->root; 5790 5791 /* 5792 * this will force the logging code to walk the dentry chain 5793 * up for the file 5794 */ 5795 if (S_ISREG(inode->i_mode)) 5796 BTRFS_I(inode)->last_unlink_trans = trans->transid; 5797 5798 /* 5799 * if this inode hasn't been logged and directory we're renaming it 5800 * from hasn't been logged, we don't need to log it 5801 */ 5802 if (BTRFS_I(inode)->logged_trans <= 5803 root->fs_info->last_trans_committed && 5804 (!old_dir || BTRFS_I(old_dir)->logged_trans <= 5805 root->fs_info->last_trans_committed)) 5806 return 0; 5807 5808 return btrfs_log_inode_parent(trans, root, inode, parent, 0, 5809 LLONG_MAX, 1, NULL); 5810 } 5811 5812