1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/writeback.h> 9 #include <linux/sched/mm.h> 10 #include "misc.h" 11 #include "ctree.h" 12 #include "transaction.h" 13 #include "btrfs_inode.h" 14 #include "extent_io.h" 15 #include "disk-io.h" 16 #include "compression.h" 17 #include "delalloc-space.h" 18 19 static struct kmem_cache *btrfs_ordered_extent_cache; 20 21 static u64 entry_end(struct btrfs_ordered_extent *entry) 22 { 23 if (entry->file_offset + entry->len < entry->file_offset) 24 return (u64)-1; 25 return entry->file_offset + entry->len; 26 } 27 28 /* returns NULL if the insertion worked, or it returns the node it did find 29 * in the tree 30 */ 31 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 32 struct rb_node *node) 33 { 34 struct rb_node **p = &root->rb_node; 35 struct rb_node *parent = NULL; 36 struct btrfs_ordered_extent *entry; 37 38 while (*p) { 39 parent = *p; 40 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 41 42 if (file_offset < entry->file_offset) 43 p = &(*p)->rb_left; 44 else if (file_offset >= entry_end(entry)) 45 p = &(*p)->rb_right; 46 else 47 return parent; 48 } 49 50 rb_link_node(node, parent, p); 51 rb_insert_color(node, root); 52 return NULL; 53 } 54 55 /* 56 * look for a given offset in the tree, and if it can't be found return the 57 * first lesser offset 58 */ 59 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 60 struct rb_node **prev_ret) 61 { 62 struct rb_node *n = root->rb_node; 63 struct rb_node *prev = NULL; 64 struct rb_node *test; 65 struct btrfs_ordered_extent *entry; 66 struct btrfs_ordered_extent *prev_entry = NULL; 67 68 while (n) { 69 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 70 prev = n; 71 prev_entry = entry; 72 73 if (file_offset < entry->file_offset) 74 n = n->rb_left; 75 else if (file_offset >= entry_end(entry)) 76 n = n->rb_right; 77 else 78 return n; 79 } 80 if (!prev_ret) 81 return NULL; 82 83 while (prev && file_offset >= entry_end(prev_entry)) { 84 test = rb_next(prev); 85 if (!test) 86 break; 87 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 88 rb_node); 89 if (file_offset < entry_end(prev_entry)) 90 break; 91 92 prev = test; 93 } 94 if (prev) 95 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 96 rb_node); 97 while (prev && file_offset < entry_end(prev_entry)) { 98 test = rb_prev(prev); 99 if (!test) 100 break; 101 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 102 rb_node); 103 prev = test; 104 } 105 *prev_ret = prev; 106 return NULL; 107 } 108 109 /* 110 * helper to check if a given offset is inside a given entry 111 */ 112 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) 113 { 114 if (file_offset < entry->file_offset || 115 entry->file_offset + entry->len <= file_offset) 116 return 0; 117 return 1; 118 } 119 120 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 121 u64 len) 122 { 123 if (file_offset + len <= entry->file_offset || 124 entry->file_offset + entry->len <= file_offset) 125 return 0; 126 return 1; 127 } 128 129 /* 130 * look find the first ordered struct that has this offset, otherwise 131 * the first one less than this offset 132 */ 133 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 134 u64 file_offset) 135 { 136 struct rb_root *root = &tree->tree; 137 struct rb_node *prev = NULL; 138 struct rb_node *ret; 139 struct btrfs_ordered_extent *entry; 140 141 if (tree->last) { 142 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 143 rb_node); 144 if (offset_in_entry(entry, file_offset)) 145 return tree->last; 146 } 147 ret = __tree_search(root, file_offset, &prev); 148 if (!ret) 149 ret = prev; 150 if (ret) 151 tree->last = ret; 152 return ret; 153 } 154 155 /* allocate and add a new ordered_extent into the per-inode tree. 156 * file_offset is the logical offset in the file 157 * 158 * start is the disk block number of an extent already reserved in the 159 * extent allocation tree 160 * 161 * len is the length of the extent 162 * 163 * The tree is given a single reference on the ordered extent that was 164 * inserted. 165 */ 166 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 167 u64 start, u64 len, u64 disk_len, 168 int type, int dio, int compress_type) 169 { 170 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 171 struct btrfs_root *root = BTRFS_I(inode)->root; 172 struct btrfs_ordered_inode_tree *tree; 173 struct rb_node *node; 174 struct btrfs_ordered_extent *entry; 175 176 tree = &BTRFS_I(inode)->ordered_tree; 177 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 178 if (!entry) 179 return -ENOMEM; 180 181 entry->file_offset = file_offset; 182 entry->start = start; 183 entry->len = len; 184 entry->disk_len = disk_len; 185 entry->bytes_left = len; 186 entry->inode = igrab(inode); 187 entry->compress_type = compress_type; 188 entry->truncated_len = (u64)-1; 189 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 190 set_bit(type, &entry->flags); 191 192 if (dio) { 193 percpu_counter_add_batch(&fs_info->dio_bytes, len, 194 fs_info->delalloc_batch); 195 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); 196 } 197 198 /* one ref for the tree */ 199 refcount_set(&entry->refs, 1); 200 init_waitqueue_head(&entry->wait); 201 INIT_LIST_HEAD(&entry->list); 202 INIT_LIST_HEAD(&entry->root_extent_list); 203 INIT_LIST_HEAD(&entry->work_list); 204 init_completion(&entry->completion); 205 INIT_LIST_HEAD(&entry->log_list); 206 INIT_LIST_HEAD(&entry->trans_list); 207 208 trace_btrfs_ordered_extent_add(inode, entry); 209 210 spin_lock_irq(&tree->lock); 211 node = tree_insert(&tree->tree, file_offset, 212 &entry->rb_node); 213 if (node) 214 btrfs_panic(fs_info, -EEXIST, 215 "inconsistency in ordered tree at offset %llu", 216 file_offset); 217 spin_unlock_irq(&tree->lock); 218 219 spin_lock(&root->ordered_extent_lock); 220 list_add_tail(&entry->root_extent_list, 221 &root->ordered_extents); 222 root->nr_ordered_extents++; 223 if (root->nr_ordered_extents == 1) { 224 spin_lock(&fs_info->ordered_root_lock); 225 BUG_ON(!list_empty(&root->ordered_root)); 226 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 227 spin_unlock(&fs_info->ordered_root_lock); 228 } 229 spin_unlock(&root->ordered_extent_lock); 230 231 /* 232 * We don't need the count_max_extents here, we can assume that all of 233 * that work has been done at higher layers, so this is truly the 234 * smallest the extent is going to get. 235 */ 236 spin_lock(&BTRFS_I(inode)->lock); 237 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); 238 spin_unlock(&BTRFS_I(inode)->lock); 239 240 return 0; 241 } 242 243 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 244 u64 start, u64 len, u64 disk_len, int type) 245 { 246 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 247 disk_len, type, 0, 248 BTRFS_COMPRESS_NONE); 249 } 250 251 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 252 u64 start, u64 len, u64 disk_len, int type) 253 { 254 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 255 disk_len, type, 1, 256 BTRFS_COMPRESS_NONE); 257 } 258 259 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 260 u64 start, u64 len, u64 disk_len, 261 int type, int compress_type) 262 { 263 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 264 disk_len, type, 0, 265 compress_type); 266 } 267 268 /* 269 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 270 * when an ordered extent is finished. If the list covers more than one 271 * ordered extent, it is split across multiples. 272 */ 273 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, 274 struct btrfs_ordered_sum *sum) 275 { 276 struct btrfs_ordered_inode_tree *tree; 277 278 tree = &BTRFS_I(entry->inode)->ordered_tree; 279 spin_lock_irq(&tree->lock); 280 list_add_tail(&sum->list, &entry->list); 281 spin_unlock_irq(&tree->lock); 282 } 283 284 /* 285 * this is used to account for finished IO across a given range 286 * of the file. The IO may span ordered extents. If 287 * a given ordered_extent is completely done, 1 is returned, otherwise 288 * 0. 289 * 290 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 291 * to make sure this function only returns 1 once for a given ordered extent. 292 * 293 * file_offset is updated to one byte past the range that is recorded as 294 * complete. This allows you to walk forward in the file. 295 */ 296 int btrfs_dec_test_first_ordered_pending(struct inode *inode, 297 struct btrfs_ordered_extent **cached, 298 u64 *file_offset, u64 io_size, int uptodate) 299 { 300 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 301 struct btrfs_ordered_inode_tree *tree; 302 struct rb_node *node; 303 struct btrfs_ordered_extent *entry = NULL; 304 int ret; 305 unsigned long flags; 306 u64 dec_end; 307 u64 dec_start; 308 u64 to_dec; 309 310 tree = &BTRFS_I(inode)->ordered_tree; 311 spin_lock_irqsave(&tree->lock, flags); 312 node = tree_search(tree, *file_offset); 313 if (!node) { 314 ret = 1; 315 goto out; 316 } 317 318 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 319 if (!offset_in_entry(entry, *file_offset)) { 320 ret = 1; 321 goto out; 322 } 323 324 dec_start = max(*file_offset, entry->file_offset); 325 dec_end = min(*file_offset + io_size, entry->file_offset + 326 entry->len); 327 *file_offset = dec_end; 328 if (dec_start > dec_end) { 329 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", 330 dec_start, dec_end); 331 } 332 to_dec = dec_end - dec_start; 333 if (to_dec > entry->bytes_left) { 334 btrfs_crit(fs_info, 335 "bad ordered accounting left %llu size %llu", 336 entry->bytes_left, to_dec); 337 } 338 entry->bytes_left -= to_dec; 339 if (!uptodate) 340 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 341 342 if (entry->bytes_left == 0) { 343 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 344 /* test_and_set_bit implies a barrier */ 345 cond_wake_up_nomb(&entry->wait); 346 } else { 347 ret = 1; 348 } 349 out: 350 if (!ret && cached && entry) { 351 *cached = entry; 352 refcount_inc(&entry->refs); 353 } 354 spin_unlock_irqrestore(&tree->lock, flags); 355 return ret == 0; 356 } 357 358 /* 359 * this is used to account for finished IO across a given range 360 * of the file. The IO should not span ordered extents. If 361 * a given ordered_extent is completely done, 1 is returned, otherwise 362 * 0. 363 * 364 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 365 * to make sure this function only returns 1 once for a given ordered extent. 366 */ 367 int btrfs_dec_test_ordered_pending(struct inode *inode, 368 struct btrfs_ordered_extent **cached, 369 u64 file_offset, u64 io_size, int uptodate) 370 { 371 struct btrfs_ordered_inode_tree *tree; 372 struct rb_node *node; 373 struct btrfs_ordered_extent *entry = NULL; 374 unsigned long flags; 375 int ret; 376 377 tree = &BTRFS_I(inode)->ordered_tree; 378 spin_lock_irqsave(&tree->lock, flags); 379 if (cached && *cached) { 380 entry = *cached; 381 goto have_entry; 382 } 383 384 node = tree_search(tree, file_offset); 385 if (!node) { 386 ret = 1; 387 goto out; 388 } 389 390 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 391 have_entry: 392 if (!offset_in_entry(entry, file_offset)) { 393 ret = 1; 394 goto out; 395 } 396 397 if (io_size > entry->bytes_left) { 398 btrfs_crit(BTRFS_I(inode)->root->fs_info, 399 "bad ordered accounting left %llu size %llu", 400 entry->bytes_left, io_size); 401 } 402 entry->bytes_left -= io_size; 403 if (!uptodate) 404 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 405 406 if (entry->bytes_left == 0) { 407 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 408 /* test_and_set_bit implies a barrier */ 409 cond_wake_up_nomb(&entry->wait); 410 } else { 411 ret = 1; 412 } 413 out: 414 if (!ret && cached && entry) { 415 *cached = entry; 416 refcount_inc(&entry->refs); 417 } 418 spin_unlock_irqrestore(&tree->lock, flags); 419 return ret == 0; 420 } 421 422 /* 423 * used to drop a reference on an ordered extent. This will free 424 * the extent if the last reference is dropped 425 */ 426 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 427 { 428 struct list_head *cur; 429 struct btrfs_ordered_sum *sum; 430 431 trace_btrfs_ordered_extent_put(entry->inode, entry); 432 433 if (refcount_dec_and_test(&entry->refs)) { 434 ASSERT(list_empty(&entry->log_list)); 435 ASSERT(list_empty(&entry->trans_list)); 436 ASSERT(list_empty(&entry->root_extent_list)); 437 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 438 if (entry->inode) 439 btrfs_add_delayed_iput(entry->inode); 440 while (!list_empty(&entry->list)) { 441 cur = entry->list.next; 442 sum = list_entry(cur, struct btrfs_ordered_sum, list); 443 list_del(&sum->list); 444 kvfree(sum); 445 } 446 kmem_cache_free(btrfs_ordered_extent_cache, entry); 447 } 448 } 449 450 /* 451 * remove an ordered extent from the tree. No references are dropped 452 * and waiters are woken up. 453 */ 454 void btrfs_remove_ordered_extent(struct inode *inode, 455 struct btrfs_ordered_extent *entry) 456 { 457 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 458 struct btrfs_ordered_inode_tree *tree; 459 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 460 struct btrfs_root *root = btrfs_inode->root; 461 struct rb_node *node; 462 463 /* This is paired with btrfs_add_ordered_extent. */ 464 spin_lock(&btrfs_inode->lock); 465 btrfs_mod_outstanding_extents(btrfs_inode, -1); 466 spin_unlock(&btrfs_inode->lock); 467 if (root != fs_info->tree_root) 468 btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false); 469 470 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 471 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len, 472 fs_info->delalloc_batch); 473 474 tree = &btrfs_inode->ordered_tree; 475 spin_lock_irq(&tree->lock); 476 node = &entry->rb_node; 477 rb_erase(node, &tree->tree); 478 RB_CLEAR_NODE(node); 479 if (tree->last == node) 480 tree->last = NULL; 481 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 482 spin_unlock_irq(&tree->lock); 483 484 spin_lock(&root->ordered_extent_lock); 485 list_del_init(&entry->root_extent_list); 486 root->nr_ordered_extents--; 487 488 trace_btrfs_ordered_extent_remove(inode, entry); 489 490 if (!root->nr_ordered_extents) { 491 spin_lock(&fs_info->ordered_root_lock); 492 BUG_ON(list_empty(&root->ordered_root)); 493 list_del_init(&root->ordered_root); 494 spin_unlock(&fs_info->ordered_root_lock); 495 } 496 spin_unlock(&root->ordered_extent_lock); 497 wake_up(&entry->wait); 498 } 499 500 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 501 { 502 struct btrfs_ordered_extent *ordered; 503 504 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 505 btrfs_start_ordered_extent(ordered->inode, ordered, 1); 506 complete(&ordered->completion); 507 } 508 509 /* 510 * wait for all the ordered extents in a root. This is done when balancing 511 * space between drives. 512 */ 513 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, 514 const u64 range_start, const u64 range_len) 515 { 516 struct btrfs_fs_info *fs_info = root->fs_info; 517 LIST_HEAD(splice); 518 LIST_HEAD(skipped); 519 LIST_HEAD(works); 520 struct btrfs_ordered_extent *ordered, *next; 521 u64 count = 0; 522 const u64 range_end = range_start + range_len; 523 524 mutex_lock(&root->ordered_extent_mutex); 525 spin_lock(&root->ordered_extent_lock); 526 list_splice_init(&root->ordered_extents, &splice); 527 while (!list_empty(&splice) && nr) { 528 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 529 root_extent_list); 530 531 if (range_end <= ordered->start || 532 ordered->start + ordered->disk_len <= range_start) { 533 list_move_tail(&ordered->root_extent_list, &skipped); 534 cond_resched_lock(&root->ordered_extent_lock); 535 continue; 536 } 537 538 list_move_tail(&ordered->root_extent_list, 539 &root->ordered_extents); 540 refcount_inc(&ordered->refs); 541 spin_unlock(&root->ordered_extent_lock); 542 543 btrfs_init_work(&ordered->flush_work, 544 btrfs_run_ordered_extent_work, NULL, NULL); 545 list_add_tail(&ordered->work_list, &works); 546 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 547 548 cond_resched(); 549 spin_lock(&root->ordered_extent_lock); 550 if (nr != U64_MAX) 551 nr--; 552 count++; 553 } 554 list_splice_tail(&skipped, &root->ordered_extents); 555 list_splice_tail(&splice, &root->ordered_extents); 556 spin_unlock(&root->ordered_extent_lock); 557 558 list_for_each_entry_safe(ordered, next, &works, work_list) { 559 list_del_init(&ordered->work_list); 560 wait_for_completion(&ordered->completion); 561 btrfs_put_ordered_extent(ordered); 562 cond_resched(); 563 } 564 mutex_unlock(&root->ordered_extent_mutex); 565 566 return count; 567 } 568 569 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, 570 const u64 range_start, const u64 range_len) 571 { 572 struct btrfs_root *root; 573 struct list_head splice; 574 u64 done; 575 576 INIT_LIST_HEAD(&splice); 577 578 mutex_lock(&fs_info->ordered_operations_mutex); 579 spin_lock(&fs_info->ordered_root_lock); 580 list_splice_init(&fs_info->ordered_roots, &splice); 581 while (!list_empty(&splice) && nr) { 582 root = list_first_entry(&splice, struct btrfs_root, 583 ordered_root); 584 root = btrfs_grab_fs_root(root); 585 BUG_ON(!root); 586 list_move_tail(&root->ordered_root, 587 &fs_info->ordered_roots); 588 spin_unlock(&fs_info->ordered_root_lock); 589 590 done = btrfs_wait_ordered_extents(root, nr, 591 range_start, range_len); 592 btrfs_put_fs_root(root); 593 594 spin_lock(&fs_info->ordered_root_lock); 595 if (nr != U64_MAX) { 596 nr -= done; 597 } 598 } 599 list_splice_tail(&splice, &fs_info->ordered_roots); 600 spin_unlock(&fs_info->ordered_root_lock); 601 mutex_unlock(&fs_info->ordered_operations_mutex); 602 } 603 604 /* 605 * Used to start IO or wait for a given ordered extent to finish. 606 * 607 * If wait is one, this effectively waits on page writeback for all the pages 608 * in the extent, and it waits on the io completion code to insert 609 * metadata into the btree corresponding to the extent 610 */ 611 void btrfs_start_ordered_extent(struct inode *inode, 612 struct btrfs_ordered_extent *entry, 613 int wait) 614 { 615 u64 start = entry->file_offset; 616 u64 end = start + entry->len - 1; 617 618 trace_btrfs_ordered_extent_start(inode, entry); 619 620 /* 621 * pages in the range can be dirty, clean or writeback. We 622 * start IO on any dirty ones so the wait doesn't stall waiting 623 * for the flusher thread to find them 624 */ 625 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 626 filemap_fdatawrite_range(inode->i_mapping, start, end); 627 if (wait) { 628 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 629 &entry->flags)); 630 } 631 } 632 633 /* 634 * Used to wait on ordered extents across a large range of bytes. 635 */ 636 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 637 { 638 int ret = 0; 639 int ret_wb = 0; 640 u64 end; 641 u64 orig_end; 642 struct btrfs_ordered_extent *ordered; 643 644 if (start + len < start) { 645 orig_end = INT_LIMIT(loff_t); 646 } else { 647 orig_end = start + len - 1; 648 if (orig_end > INT_LIMIT(loff_t)) 649 orig_end = INT_LIMIT(loff_t); 650 } 651 652 /* start IO across the range first to instantiate any delalloc 653 * extents 654 */ 655 ret = btrfs_fdatawrite_range(inode, start, orig_end); 656 if (ret) 657 return ret; 658 659 /* 660 * If we have a writeback error don't return immediately. Wait first 661 * for any ordered extents that haven't completed yet. This is to make 662 * sure no one can dirty the same page ranges and call writepages() 663 * before the ordered extents complete - to avoid failures (-EEXIST) 664 * when adding the new ordered extents to the ordered tree. 665 */ 666 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 667 668 end = orig_end; 669 while (1) { 670 ordered = btrfs_lookup_first_ordered_extent(inode, end); 671 if (!ordered) 672 break; 673 if (ordered->file_offset > orig_end) { 674 btrfs_put_ordered_extent(ordered); 675 break; 676 } 677 if (ordered->file_offset + ordered->len <= start) { 678 btrfs_put_ordered_extent(ordered); 679 break; 680 } 681 btrfs_start_ordered_extent(inode, ordered, 1); 682 end = ordered->file_offset; 683 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 684 ret = -EIO; 685 btrfs_put_ordered_extent(ordered); 686 if (ret || end == 0 || end == start) 687 break; 688 end--; 689 } 690 return ret_wb ? ret_wb : ret; 691 } 692 693 /* 694 * find an ordered extent corresponding to file_offset. return NULL if 695 * nothing is found, otherwise take a reference on the extent and return it 696 */ 697 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 698 u64 file_offset) 699 { 700 struct btrfs_ordered_inode_tree *tree; 701 struct rb_node *node; 702 struct btrfs_ordered_extent *entry = NULL; 703 704 tree = &BTRFS_I(inode)->ordered_tree; 705 spin_lock_irq(&tree->lock); 706 node = tree_search(tree, file_offset); 707 if (!node) 708 goto out; 709 710 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 711 if (!offset_in_entry(entry, file_offset)) 712 entry = NULL; 713 if (entry) 714 refcount_inc(&entry->refs); 715 out: 716 spin_unlock_irq(&tree->lock); 717 return entry; 718 } 719 720 /* Since the DIO code tries to lock a wide area we need to look for any ordered 721 * extents that exist in the range, rather than just the start of the range. 722 */ 723 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 724 struct btrfs_inode *inode, u64 file_offset, u64 len) 725 { 726 struct btrfs_ordered_inode_tree *tree; 727 struct rb_node *node; 728 struct btrfs_ordered_extent *entry = NULL; 729 730 tree = &inode->ordered_tree; 731 spin_lock_irq(&tree->lock); 732 node = tree_search(tree, file_offset); 733 if (!node) { 734 node = tree_search(tree, file_offset + len); 735 if (!node) 736 goto out; 737 } 738 739 while (1) { 740 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 741 if (range_overlaps(entry, file_offset, len)) 742 break; 743 744 if (entry->file_offset >= file_offset + len) { 745 entry = NULL; 746 break; 747 } 748 entry = NULL; 749 node = rb_next(node); 750 if (!node) 751 break; 752 } 753 out: 754 if (entry) 755 refcount_inc(&entry->refs); 756 spin_unlock_irq(&tree->lock); 757 return entry; 758 } 759 760 /* 761 * lookup and return any extent before 'file_offset'. NULL is returned 762 * if none is found 763 */ 764 struct btrfs_ordered_extent * 765 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) 766 { 767 struct btrfs_ordered_inode_tree *tree; 768 struct rb_node *node; 769 struct btrfs_ordered_extent *entry = NULL; 770 771 tree = &BTRFS_I(inode)->ordered_tree; 772 spin_lock_irq(&tree->lock); 773 node = tree_search(tree, file_offset); 774 if (!node) 775 goto out; 776 777 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 778 refcount_inc(&entry->refs); 779 out: 780 spin_unlock_irq(&tree->lock); 781 return entry; 782 } 783 784 /* 785 * After an extent is done, call this to conditionally update the on disk 786 * i_size. i_size is updated to cover any fully written part of the file. 787 */ 788 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 789 struct btrfs_ordered_extent *ordered) 790 { 791 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 792 u64 disk_i_size; 793 u64 new_i_size; 794 u64 i_size = i_size_read(inode); 795 struct rb_node *node; 796 struct rb_node *prev = NULL; 797 struct btrfs_ordered_extent *test; 798 int ret = 1; 799 u64 orig_offset = offset; 800 801 spin_lock_irq(&tree->lock); 802 if (ordered) { 803 offset = entry_end(ordered); 804 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) 805 offset = min(offset, 806 ordered->file_offset + 807 ordered->truncated_len); 808 } else { 809 offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); 810 } 811 disk_i_size = BTRFS_I(inode)->disk_i_size; 812 813 /* 814 * truncate file. 815 * If ordered is not NULL, then this is called from endio and 816 * disk_i_size will be updated by either truncate itself or any 817 * in-flight IOs which are inside the disk_i_size. 818 * 819 * Because btrfs_setsize() may set i_size with disk_i_size if truncate 820 * fails somehow, we need to make sure we have a precise disk_i_size by 821 * updating it as usual. 822 * 823 */ 824 if (!ordered && disk_i_size > i_size) { 825 BTRFS_I(inode)->disk_i_size = orig_offset; 826 ret = 0; 827 goto out; 828 } 829 830 /* 831 * if the disk i_size is already at the inode->i_size, or 832 * this ordered extent is inside the disk i_size, we're done 833 */ 834 if (disk_i_size == i_size) 835 goto out; 836 837 /* 838 * We still need to update disk_i_size if outstanding_isize is greater 839 * than disk_i_size. 840 */ 841 if (offset <= disk_i_size && 842 (!ordered || ordered->outstanding_isize <= disk_i_size)) 843 goto out; 844 845 /* 846 * walk backward from this ordered extent to disk_i_size. 847 * if we find an ordered extent then we can't update disk i_size 848 * yet 849 */ 850 if (ordered) { 851 node = rb_prev(&ordered->rb_node); 852 } else { 853 prev = tree_search(tree, offset); 854 /* 855 * we insert file extents without involving ordered struct, 856 * so there should be no ordered struct cover this offset 857 */ 858 if (prev) { 859 test = rb_entry(prev, struct btrfs_ordered_extent, 860 rb_node); 861 BUG_ON(offset_in_entry(test, offset)); 862 } 863 node = prev; 864 } 865 for (; node; node = rb_prev(node)) { 866 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 867 868 /* We treat this entry as if it doesn't exist */ 869 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) 870 continue; 871 872 if (entry_end(test) <= disk_i_size) 873 break; 874 if (test->file_offset >= i_size) 875 break; 876 877 /* 878 * We don't update disk_i_size now, so record this undealt 879 * i_size. Or we will not know the real i_size. 880 */ 881 if (test->outstanding_isize < offset) 882 test->outstanding_isize = offset; 883 if (ordered && 884 ordered->outstanding_isize > test->outstanding_isize) 885 test->outstanding_isize = ordered->outstanding_isize; 886 goto out; 887 } 888 new_i_size = min_t(u64, offset, i_size); 889 890 /* 891 * Some ordered extents may completed before the current one, and 892 * we hold the real i_size in ->outstanding_isize. 893 */ 894 if (ordered && ordered->outstanding_isize > new_i_size) 895 new_i_size = min_t(u64, ordered->outstanding_isize, i_size); 896 BTRFS_I(inode)->disk_i_size = new_i_size; 897 ret = 0; 898 out: 899 /* 900 * We need to do this because we can't remove ordered extents until 901 * after the i_disk_size has been updated and then the inode has been 902 * updated to reflect the change, so we need to tell anybody who finds 903 * this ordered extent that we've already done all the real work, we 904 * just haven't completed all the other work. 905 */ 906 if (ordered) 907 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); 908 spin_unlock_irq(&tree->lock); 909 return ret; 910 } 911 912 /* 913 * search the ordered extents for one corresponding to 'offset' and 914 * try to find a checksum. This is used because we allow pages to 915 * be reclaimed before their checksum is actually put into the btree 916 */ 917 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, 918 u8 *sum, int len) 919 { 920 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 921 struct btrfs_ordered_sum *ordered_sum; 922 struct btrfs_ordered_extent *ordered; 923 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 924 unsigned long num_sectors; 925 unsigned long i; 926 u32 sectorsize = btrfs_inode_sectorsize(inode); 927 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 928 int index = 0; 929 930 ordered = btrfs_lookup_ordered_extent(inode, offset); 931 if (!ordered) 932 return 0; 933 934 spin_lock_irq(&tree->lock); 935 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 936 if (disk_bytenr >= ordered_sum->bytenr && 937 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { 938 i = (disk_bytenr - ordered_sum->bytenr) >> 939 inode->i_sb->s_blocksize_bits; 940 num_sectors = ordered_sum->len >> 941 inode->i_sb->s_blocksize_bits; 942 num_sectors = min_t(int, len - index, num_sectors - i); 943 memcpy(sum + index, ordered_sum->sums + i * csum_size, 944 num_sectors * csum_size); 945 946 index += (int)num_sectors * csum_size; 947 if (index == len) 948 goto out; 949 disk_bytenr += num_sectors * sectorsize; 950 } 951 } 952 out: 953 spin_unlock_irq(&tree->lock); 954 btrfs_put_ordered_extent(ordered); 955 return index; 956 } 957 958 /* 959 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending 960 * ordered extents in it are run to completion. 961 * 962 * @tree: IO tree used for locking out other users of the range 963 * @inode: Inode whose ordered tree is to be searched 964 * @start: Beginning of range to flush 965 * @end: Last byte of range to lock 966 * @cached_state: If passed, will return the extent state responsible for the 967 * locked range. It's the caller's responsibility to free the cached state. 968 * 969 * This function always returns with the given range locked, ensuring after it's 970 * called no order extent can be pending. 971 */ 972 void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, 973 struct btrfs_inode *inode, u64 start, 974 u64 end, 975 struct extent_state **cached_state) 976 { 977 struct btrfs_ordered_extent *ordered; 978 struct extent_state *cache = NULL; 979 struct extent_state **cachedp = &cache; 980 981 if (cached_state) 982 cachedp = cached_state; 983 984 while (1) { 985 lock_extent_bits(tree, start, end, cachedp); 986 ordered = btrfs_lookup_ordered_range(inode, start, 987 end - start + 1); 988 if (!ordered) { 989 /* 990 * If no external cached_state has been passed then 991 * decrement the extra ref taken for cachedp since we 992 * aren't exposing it outside of this function 993 */ 994 if (!cached_state) 995 refcount_dec(&cache->refs); 996 break; 997 } 998 unlock_extent_cached(tree, start, end, cachedp); 999 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); 1000 btrfs_put_ordered_extent(ordered); 1001 } 1002 } 1003 1004 int __init ordered_data_init(void) 1005 { 1006 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1007 sizeof(struct btrfs_ordered_extent), 0, 1008 SLAB_MEM_SPREAD, 1009 NULL); 1010 if (!btrfs_ordered_extent_cache) 1011 return -ENOMEM; 1012 1013 return 0; 1014 } 1015 1016 void __cold ordered_data_exit(void) 1017 { 1018 kmem_cache_destroy(btrfs_ordered_extent_cache); 1019 } 1020