1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/writeback.h> 9 #include <linux/sched/mm.h> 10 #include "misc.h" 11 #include "ctree.h" 12 #include "transaction.h" 13 #include "btrfs_inode.h" 14 #include "extent_io.h" 15 #include "disk-io.h" 16 #include "compression.h" 17 #include "delalloc-space.h" 18 19 static struct kmem_cache *btrfs_ordered_extent_cache; 20 21 static u64 entry_end(struct btrfs_ordered_extent *entry) 22 { 23 if (entry->file_offset + entry->len < entry->file_offset) 24 return (u64)-1; 25 return entry->file_offset + entry->len; 26 } 27 28 /* returns NULL if the insertion worked, or it returns the node it did find 29 * in the tree 30 */ 31 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 32 struct rb_node *node) 33 { 34 struct rb_node **p = &root->rb_node; 35 struct rb_node *parent = NULL; 36 struct btrfs_ordered_extent *entry; 37 38 while (*p) { 39 parent = *p; 40 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 41 42 if (file_offset < entry->file_offset) 43 p = &(*p)->rb_left; 44 else if (file_offset >= entry_end(entry)) 45 p = &(*p)->rb_right; 46 else 47 return parent; 48 } 49 50 rb_link_node(node, parent, p); 51 rb_insert_color(node, root); 52 return NULL; 53 } 54 55 static void ordered_data_tree_panic(struct inode *inode, int errno, 56 u64 offset) 57 { 58 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 59 btrfs_panic(fs_info, errno, 60 "Inconsistency in ordered tree at offset %llu", offset); 61 } 62 63 /* 64 * look for a given offset in the tree, and if it can't be found return the 65 * first lesser offset 66 */ 67 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 68 struct rb_node **prev_ret) 69 { 70 struct rb_node *n = root->rb_node; 71 struct rb_node *prev = NULL; 72 struct rb_node *test; 73 struct btrfs_ordered_extent *entry; 74 struct btrfs_ordered_extent *prev_entry = NULL; 75 76 while (n) { 77 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 78 prev = n; 79 prev_entry = entry; 80 81 if (file_offset < entry->file_offset) 82 n = n->rb_left; 83 else if (file_offset >= entry_end(entry)) 84 n = n->rb_right; 85 else 86 return n; 87 } 88 if (!prev_ret) 89 return NULL; 90 91 while (prev && file_offset >= entry_end(prev_entry)) { 92 test = rb_next(prev); 93 if (!test) 94 break; 95 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 96 rb_node); 97 if (file_offset < entry_end(prev_entry)) 98 break; 99 100 prev = test; 101 } 102 if (prev) 103 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 104 rb_node); 105 while (prev && file_offset < entry_end(prev_entry)) { 106 test = rb_prev(prev); 107 if (!test) 108 break; 109 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 110 rb_node); 111 prev = test; 112 } 113 *prev_ret = prev; 114 return NULL; 115 } 116 117 /* 118 * helper to check if a given offset is inside a given entry 119 */ 120 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) 121 { 122 if (file_offset < entry->file_offset || 123 entry->file_offset + entry->len <= file_offset) 124 return 0; 125 return 1; 126 } 127 128 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 129 u64 len) 130 { 131 if (file_offset + len <= entry->file_offset || 132 entry->file_offset + entry->len <= file_offset) 133 return 0; 134 return 1; 135 } 136 137 /* 138 * look find the first ordered struct that has this offset, otherwise 139 * the first one less than this offset 140 */ 141 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 142 u64 file_offset) 143 { 144 struct rb_root *root = &tree->tree; 145 struct rb_node *prev = NULL; 146 struct rb_node *ret; 147 struct btrfs_ordered_extent *entry; 148 149 if (tree->last) { 150 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 151 rb_node); 152 if (offset_in_entry(entry, file_offset)) 153 return tree->last; 154 } 155 ret = __tree_search(root, file_offset, &prev); 156 if (!ret) 157 ret = prev; 158 if (ret) 159 tree->last = ret; 160 return ret; 161 } 162 163 /* allocate and add a new ordered_extent into the per-inode tree. 164 * file_offset is the logical offset in the file 165 * 166 * start is the disk block number of an extent already reserved in the 167 * extent allocation tree 168 * 169 * len is the length of the extent 170 * 171 * The tree is given a single reference on the ordered extent that was 172 * inserted. 173 */ 174 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 175 u64 start, u64 len, u64 disk_len, 176 int type, int dio, int compress_type) 177 { 178 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 179 struct btrfs_root *root = BTRFS_I(inode)->root; 180 struct btrfs_ordered_inode_tree *tree; 181 struct rb_node *node; 182 struct btrfs_ordered_extent *entry; 183 184 tree = &BTRFS_I(inode)->ordered_tree; 185 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 186 if (!entry) 187 return -ENOMEM; 188 189 entry->file_offset = file_offset; 190 entry->start = start; 191 entry->len = len; 192 entry->disk_len = disk_len; 193 entry->bytes_left = len; 194 entry->inode = igrab(inode); 195 entry->compress_type = compress_type; 196 entry->truncated_len = (u64)-1; 197 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 198 set_bit(type, &entry->flags); 199 200 if (dio) { 201 percpu_counter_add_batch(&fs_info->dio_bytes, len, 202 fs_info->delalloc_batch); 203 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); 204 } 205 206 /* one ref for the tree */ 207 refcount_set(&entry->refs, 1); 208 init_waitqueue_head(&entry->wait); 209 INIT_LIST_HEAD(&entry->list); 210 INIT_LIST_HEAD(&entry->root_extent_list); 211 INIT_LIST_HEAD(&entry->work_list); 212 init_completion(&entry->completion); 213 INIT_LIST_HEAD(&entry->log_list); 214 INIT_LIST_HEAD(&entry->trans_list); 215 216 trace_btrfs_ordered_extent_add(inode, entry); 217 218 spin_lock_irq(&tree->lock); 219 node = tree_insert(&tree->tree, file_offset, 220 &entry->rb_node); 221 if (node) 222 ordered_data_tree_panic(inode, -EEXIST, file_offset); 223 spin_unlock_irq(&tree->lock); 224 225 spin_lock(&root->ordered_extent_lock); 226 list_add_tail(&entry->root_extent_list, 227 &root->ordered_extents); 228 root->nr_ordered_extents++; 229 if (root->nr_ordered_extents == 1) { 230 spin_lock(&fs_info->ordered_root_lock); 231 BUG_ON(!list_empty(&root->ordered_root)); 232 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 233 spin_unlock(&fs_info->ordered_root_lock); 234 } 235 spin_unlock(&root->ordered_extent_lock); 236 237 /* 238 * We don't need the count_max_extents here, we can assume that all of 239 * that work has been done at higher layers, so this is truly the 240 * smallest the extent is going to get. 241 */ 242 spin_lock(&BTRFS_I(inode)->lock); 243 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); 244 spin_unlock(&BTRFS_I(inode)->lock); 245 246 return 0; 247 } 248 249 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 250 u64 start, u64 len, u64 disk_len, int type) 251 { 252 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 253 disk_len, type, 0, 254 BTRFS_COMPRESS_NONE); 255 } 256 257 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 258 u64 start, u64 len, u64 disk_len, int type) 259 { 260 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 261 disk_len, type, 1, 262 BTRFS_COMPRESS_NONE); 263 } 264 265 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 266 u64 start, u64 len, u64 disk_len, 267 int type, int compress_type) 268 { 269 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 270 disk_len, type, 0, 271 compress_type); 272 } 273 274 /* 275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 276 * when an ordered extent is finished. If the list covers more than one 277 * ordered extent, it is split across multiples. 278 */ 279 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, 280 struct btrfs_ordered_sum *sum) 281 { 282 struct btrfs_ordered_inode_tree *tree; 283 284 tree = &BTRFS_I(entry->inode)->ordered_tree; 285 spin_lock_irq(&tree->lock); 286 list_add_tail(&sum->list, &entry->list); 287 spin_unlock_irq(&tree->lock); 288 } 289 290 /* 291 * this is used to account for finished IO across a given range 292 * of the file. The IO may span ordered extents. If 293 * a given ordered_extent is completely done, 1 is returned, otherwise 294 * 0. 295 * 296 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 297 * to make sure this function only returns 1 once for a given ordered extent. 298 * 299 * file_offset is updated to one byte past the range that is recorded as 300 * complete. This allows you to walk forward in the file. 301 */ 302 int btrfs_dec_test_first_ordered_pending(struct inode *inode, 303 struct btrfs_ordered_extent **cached, 304 u64 *file_offset, u64 io_size, int uptodate) 305 { 306 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 307 struct btrfs_ordered_inode_tree *tree; 308 struct rb_node *node; 309 struct btrfs_ordered_extent *entry = NULL; 310 int ret; 311 unsigned long flags; 312 u64 dec_end; 313 u64 dec_start; 314 u64 to_dec; 315 316 tree = &BTRFS_I(inode)->ordered_tree; 317 spin_lock_irqsave(&tree->lock, flags); 318 node = tree_search(tree, *file_offset); 319 if (!node) { 320 ret = 1; 321 goto out; 322 } 323 324 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 325 if (!offset_in_entry(entry, *file_offset)) { 326 ret = 1; 327 goto out; 328 } 329 330 dec_start = max(*file_offset, entry->file_offset); 331 dec_end = min(*file_offset + io_size, entry->file_offset + 332 entry->len); 333 *file_offset = dec_end; 334 if (dec_start > dec_end) { 335 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", 336 dec_start, dec_end); 337 } 338 to_dec = dec_end - dec_start; 339 if (to_dec > entry->bytes_left) { 340 btrfs_crit(fs_info, 341 "bad ordered accounting left %llu size %llu", 342 entry->bytes_left, to_dec); 343 } 344 entry->bytes_left -= to_dec; 345 if (!uptodate) 346 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 347 348 if (entry->bytes_left == 0) { 349 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 350 /* test_and_set_bit implies a barrier */ 351 cond_wake_up_nomb(&entry->wait); 352 } else { 353 ret = 1; 354 } 355 out: 356 if (!ret && cached && entry) { 357 *cached = entry; 358 refcount_inc(&entry->refs); 359 } 360 spin_unlock_irqrestore(&tree->lock, flags); 361 return ret == 0; 362 } 363 364 /* 365 * this is used to account for finished IO across a given range 366 * of the file. The IO should not span ordered extents. If 367 * a given ordered_extent is completely done, 1 is returned, otherwise 368 * 0. 369 * 370 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 371 * to make sure this function only returns 1 once for a given ordered extent. 372 */ 373 int btrfs_dec_test_ordered_pending(struct inode *inode, 374 struct btrfs_ordered_extent **cached, 375 u64 file_offset, u64 io_size, int uptodate) 376 { 377 struct btrfs_ordered_inode_tree *tree; 378 struct rb_node *node; 379 struct btrfs_ordered_extent *entry = NULL; 380 unsigned long flags; 381 int ret; 382 383 tree = &BTRFS_I(inode)->ordered_tree; 384 spin_lock_irqsave(&tree->lock, flags); 385 if (cached && *cached) { 386 entry = *cached; 387 goto have_entry; 388 } 389 390 node = tree_search(tree, file_offset); 391 if (!node) { 392 ret = 1; 393 goto out; 394 } 395 396 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 397 have_entry: 398 if (!offset_in_entry(entry, file_offset)) { 399 ret = 1; 400 goto out; 401 } 402 403 if (io_size > entry->bytes_left) { 404 btrfs_crit(BTRFS_I(inode)->root->fs_info, 405 "bad ordered accounting left %llu size %llu", 406 entry->bytes_left, io_size); 407 } 408 entry->bytes_left -= io_size; 409 if (!uptodate) 410 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 411 412 if (entry->bytes_left == 0) { 413 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 414 /* test_and_set_bit implies a barrier */ 415 cond_wake_up_nomb(&entry->wait); 416 } else { 417 ret = 1; 418 } 419 out: 420 if (!ret && cached && entry) { 421 *cached = entry; 422 refcount_inc(&entry->refs); 423 } 424 spin_unlock_irqrestore(&tree->lock, flags); 425 return ret == 0; 426 } 427 428 /* 429 * used to drop a reference on an ordered extent. This will free 430 * the extent if the last reference is dropped 431 */ 432 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 433 { 434 struct list_head *cur; 435 struct btrfs_ordered_sum *sum; 436 437 trace_btrfs_ordered_extent_put(entry->inode, entry); 438 439 if (refcount_dec_and_test(&entry->refs)) { 440 ASSERT(list_empty(&entry->log_list)); 441 ASSERT(list_empty(&entry->trans_list)); 442 ASSERT(list_empty(&entry->root_extent_list)); 443 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 444 if (entry->inode) 445 btrfs_add_delayed_iput(entry->inode); 446 while (!list_empty(&entry->list)) { 447 cur = entry->list.next; 448 sum = list_entry(cur, struct btrfs_ordered_sum, list); 449 list_del(&sum->list); 450 kvfree(sum); 451 } 452 kmem_cache_free(btrfs_ordered_extent_cache, entry); 453 } 454 } 455 456 /* 457 * remove an ordered extent from the tree. No references are dropped 458 * and waiters are woken up. 459 */ 460 void btrfs_remove_ordered_extent(struct inode *inode, 461 struct btrfs_ordered_extent *entry) 462 { 463 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 464 struct btrfs_ordered_inode_tree *tree; 465 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 466 struct btrfs_root *root = btrfs_inode->root; 467 struct rb_node *node; 468 469 /* This is paired with btrfs_add_ordered_extent. */ 470 spin_lock(&btrfs_inode->lock); 471 btrfs_mod_outstanding_extents(btrfs_inode, -1); 472 spin_unlock(&btrfs_inode->lock); 473 if (root != fs_info->tree_root) 474 btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false); 475 476 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 477 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len, 478 fs_info->delalloc_batch); 479 480 tree = &btrfs_inode->ordered_tree; 481 spin_lock_irq(&tree->lock); 482 node = &entry->rb_node; 483 rb_erase(node, &tree->tree); 484 RB_CLEAR_NODE(node); 485 if (tree->last == node) 486 tree->last = NULL; 487 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 488 spin_unlock_irq(&tree->lock); 489 490 spin_lock(&root->ordered_extent_lock); 491 list_del_init(&entry->root_extent_list); 492 root->nr_ordered_extents--; 493 494 trace_btrfs_ordered_extent_remove(inode, entry); 495 496 if (!root->nr_ordered_extents) { 497 spin_lock(&fs_info->ordered_root_lock); 498 BUG_ON(list_empty(&root->ordered_root)); 499 list_del_init(&root->ordered_root); 500 spin_unlock(&fs_info->ordered_root_lock); 501 } 502 spin_unlock(&root->ordered_extent_lock); 503 wake_up(&entry->wait); 504 } 505 506 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 507 { 508 struct btrfs_ordered_extent *ordered; 509 510 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 511 btrfs_start_ordered_extent(ordered->inode, ordered, 1); 512 complete(&ordered->completion); 513 } 514 515 /* 516 * wait for all the ordered extents in a root. This is done when balancing 517 * space between drives. 518 */ 519 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, 520 const u64 range_start, const u64 range_len) 521 { 522 struct btrfs_fs_info *fs_info = root->fs_info; 523 LIST_HEAD(splice); 524 LIST_HEAD(skipped); 525 LIST_HEAD(works); 526 struct btrfs_ordered_extent *ordered, *next; 527 u64 count = 0; 528 const u64 range_end = range_start + range_len; 529 530 mutex_lock(&root->ordered_extent_mutex); 531 spin_lock(&root->ordered_extent_lock); 532 list_splice_init(&root->ordered_extents, &splice); 533 while (!list_empty(&splice) && nr) { 534 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 535 root_extent_list); 536 537 if (range_end <= ordered->start || 538 ordered->start + ordered->disk_len <= range_start) { 539 list_move_tail(&ordered->root_extent_list, &skipped); 540 cond_resched_lock(&root->ordered_extent_lock); 541 continue; 542 } 543 544 list_move_tail(&ordered->root_extent_list, 545 &root->ordered_extents); 546 refcount_inc(&ordered->refs); 547 spin_unlock(&root->ordered_extent_lock); 548 549 btrfs_init_work(&ordered->flush_work, 550 btrfs_run_ordered_extent_work, NULL, NULL); 551 list_add_tail(&ordered->work_list, &works); 552 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 553 554 cond_resched(); 555 spin_lock(&root->ordered_extent_lock); 556 if (nr != U64_MAX) 557 nr--; 558 count++; 559 } 560 list_splice_tail(&skipped, &root->ordered_extents); 561 list_splice_tail(&splice, &root->ordered_extents); 562 spin_unlock(&root->ordered_extent_lock); 563 564 list_for_each_entry_safe(ordered, next, &works, work_list) { 565 list_del_init(&ordered->work_list); 566 wait_for_completion(&ordered->completion); 567 btrfs_put_ordered_extent(ordered); 568 cond_resched(); 569 } 570 mutex_unlock(&root->ordered_extent_mutex); 571 572 return count; 573 } 574 575 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, 576 const u64 range_start, const u64 range_len) 577 { 578 struct btrfs_root *root; 579 struct list_head splice; 580 u64 done; 581 582 INIT_LIST_HEAD(&splice); 583 584 mutex_lock(&fs_info->ordered_operations_mutex); 585 spin_lock(&fs_info->ordered_root_lock); 586 list_splice_init(&fs_info->ordered_roots, &splice); 587 while (!list_empty(&splice) && nr) { 588 root = list_first_entry(&splice, struct btrfs_root, 589 ordered_root); 590 root = btrfs_grab_fs_root(root); 591 BUG_ON(!root); 592 list_move_tail(&root->ordered_root, 593 &fs_info->ordered_roots); 594 spin_unlock(&fs_info->ordered_root_lock); 595 596 done = btrfs_wait_ordered_extents(root, nr, 597 range_start, range_len); 598 btrfs_put_fs_root(root); 599 600 spin_lock(&fs_info->ordered_root_lock); 601 if (nr != U64_MAX) { 602 nr -= done; 603 } 604 } 605 list_splice_tail(&splice, &fs_info->ordered_roots); 606 spin_unlock(&fs_info->ordered_root_lock); 607 mutex_unlock(&fs_info->ordered_operations_mutex); 608 } 609 610 /* 611 * Used to start IO or wait for a given ordered extent to finish. 612 * 613 * If wait is one, this effectively waits on page writeback for all the pages 614 * in the extent, and it waits on the io completion code to insert 615 * metadata into the btree corresponding to the extent 616 */ 617 void btrfs_start_ordered_extent(struct inode *inode, 618 struct btrfs_ordered_extent *entry, 619 int wait) 620 { 621 u64 start = entry->file_offset; 622 u64 end = start + entry->len - 1; 623 624 trace_btrfs_ordered_extent_start(inode, entry); 625 626 /* 627 * pages in the range can be dirty, clean or writeback. We 628 * start IO on any dirty ones so the wait doesn't stall waiting 629 * for the flusher thread to find them 630 */ 631 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 632 filemap_fdatawrite_range(inode->i_mapping, start, end); 633 if (wait) { 634 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 635 &entry->flags)); 636 } 637 } 638 639 /* 640 * Used to wait on ordered extents across a large range of bytes. 641 */ 642 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 643 { 644 int ret = 0; 645 int ret_wb = 0; 646 u64 end; 647 u64 orig_end; 648 struct btrfs_ordered_extent *ordered; 649 650 if (start + len < start) { 651 orig_end = INT_LIMIT(loff_t); 652 } else { 653 orig_end = start + len - 1; 654 if (orig_end > INT_LIMIT(loff_t)) 655 orig_end = INT_LIMIT(loff_t); 656 } 657 658 /* start IO across the range first to instantiate any delalloc 659 * extents 660 */ 661 ret = btrfs_fdatawrite_range(inode, start, orig_end); 662 if (ret) 663 return ret; 664 665 /* 666 * If we have a writeback error don't return immediately. Wait first 667 * for any ordered extents that haven't completed yet. This is to make 668 * sure no one can dirty the same page ranges and call writepages() 669 * before the ordered extents complete - to avoid failures (-EEXIST) 670 * when adding the new ordered extents to the ordered tree. 671 */ 672 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 673 674 end = orig_end; 675 while (1) { 676 ordered = btrfs_lookup_first_ordered_extent(inode, end); 677 if (!ordered) 678 break; 679 if (ordered->file_offset > orig_end) { 680 btrfs_put_ordered_extent(ordered); 681 break; 682 } 683 if (ordered->file_offset + ordered->len <= start) { 684 btrfs_put_ordered_extent(ordered); 685 break; 686 } 687 btrfs_start_ordered_extent(inode, ordered, 1); 688 end = ordered->file_offset; 689 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 690 ret = -EIO; 691 btrfs_put_ordered_extent(ordered); 692 if (ret || end == 0 || end == start) 693 break; 694 end--; 695 } 696 return ret_wb ? ret_wb : ret; 697 } 698 699 /* 700 * find an ordered extent corresponding to file_offset. return NULL if 701 * nothing is found, otherwise take a reference on the extent and return it 702 */ 703 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 704 u64 file_offset) 705 { 706 struct btrfs_ordered_inode_tree *tree; 707 struct rb_node *node; 708 struct btrfs_ordered_extent *entry = NULL; 709 710 tree = &BTRFS_I(inode)->ordered_tree; 711 spin_lock_irq(&tree->lock); 712 node = tree_search(tree, file_offset); 713 if (!node) 714 goto out; 715 716 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 717 if (!offset_in_entry(entry, file_offset)) 718 entry = NULL; 719 if (entry) 720 refcount_inc(&entry->refs); 721 out: 722 spin_unlock_irq(&tree->lock); 723 return entry; 724 } 725 726 /* Since the DIO code tries to lock a wide area we need to look for any ordered 727 * extents that exist in the range, rather than just the start of the range. 728 */ 729 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 730 struct btrfs_inode *inode, u64 file_offset, u64 len) 731 { 732 struct btrfs_ordered_inode_tree *tree; 733 struct rb_node *node; 734 struct btrfs_ordered_extent *entry = NULL; 735 736 tree = &inode->ordered_tree; 737 spin_lock_irq(&tree->lock); 738 node = tree_search(tree, file_offset); 739 if (!node) { 740 node = tree_search(tree, file_offset + len); 741 if (!node) 742 goto out; 743 } 744 745 while (1) { 746 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 747 if (range_overlaps(entry, file_offset, len)) 748 break; 749 750 if (entry->file_offset >= file_offset + len) { 751 entry = NULL; 752 break; 753 } 754 entry = NULL; 755 node = rb_next(node); 756 if (!node) 757 break; 758 } 759 out: 760 if (entry) 761 refcount_inc(&entry->refs); 762 spin_unlock_irq(&tree->lock); 763 return entry; 764 } 765 766 /* 767 * lookup and return any extent before 'file_offset'. NULL is returned 768 * if none is found 769 */ 770 struct btrfs_ordered_extent * 771 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) 772 { 773 struct btrfs_ordered_inode_tree *tree; 774 struct rb_node *node; 775 struct btrfs_ordered_extent *entry = NULL; 776 777 tree = &BTRFS_I(inode)->ordered_tree; 778 spin_lock_irq(&tree->lock); 779 node = tree_search(tree, file_offset); 780 if (!node) 781 goto out; 782 783 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 784 refcount_inc(&entry->refs); 785 out: 786 spin_unlock_irq(&tree->lock); 787 return entry; 788 } 789 790 /* 791 * After an extent is done, call this to conditionally update the on disk 792 * i_size. i_size is updated to cover any fully written part of the file. 793 */ 794 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 795 struct btrfs_ordered_extent *ordered) 796 { 797 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 798 u64 disk_i_size; 799 u64 new_i_size; 800 u64 i_size = i_size_read(inode); 801 struct rb_node *node; 802 struct rb_node *prev = NULL; 803 struct btrfs_ordered_extent *test; 804 int ret = 1; 805 u64 orig_offset = offset; 806 807 spin_lock_irq(&tree->lock); 808 if (ordered) { 809 offset = entry_end(ordered); 810 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) 811 offset = min(offset, 812 ordered->file_offset + 813 ordered->truncated_len); 814 } else { 815 offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); 816 } 817 disk_i_size = BTRFS_I(inode)->disk_i_size; 818 819 /* 820 * truncate file. 821 * If ordered is not NULL, then this is called from endio and 822 * disk_i_size will be updated by either truncate itself or any 823 * in-flight IOs which are inside the disk_i_size. 824 * 825 * Because btrfs_setsize() may set i_size with disk_i_size if truncate 826 * fails somehow, we need to make sure we have a precise disk_i_size by 827 * updating it as usual. 828 * 829 */ 830 if (!ordered && disk_i_size > i_size) { 831 BTRFS_I(inode)->disk_i_size = orig_offset; 832 ret = 0; 833 goto out; 834 } 835 836 /* 837 * if the disk i_size is already at the inode->i_size, or 838 * this ordered extent is inside the disk i_size, we're done 839 */ 840 if (disk_i_size == i_size) 841 goto out; 842 843 /* 844 * We still need to update disk_i_size if outstanding_isize is greater 845 * than disk_i_size. 846 */ 847 if (offset <= disk_i_size && 848 (!ordered || ordered->outstanding_isize <= disk_i_size)) 849 goto out; 850 851 /* 852 * walk backward from this ordered extent to disk_i_size. 853 * if we find an ordered extent then we can't update disk i_size 854 * yet 855 */ 856 if (ordered) { 857 node = rb_prev(&ordered->rb_node); 858 } else { 859 prev = tree_search(tree, offset); 860 /* 861 * we insert file extents without involving ordered struct, 862 * so there should be no ordered struct cover this offset 863 */ 864 if (prev) { 865 test = rb_entry(prev, struct btrfs_ordered_extent, 866 rb_node); 867 BUG_ON(offset_in_entry(test, offset)); 868 } 869 node = prev; 870 } 871 for (; node; node = rb_prev(node)) { 872 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 873 874 /* We treat this entry as if it doesn't exist */ 875 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) 876 continue; 877 878 if (entry_end(test) <= disk_i_size) 879 break; 880 if (test->file_offset >= i_size) 881 break; 882 883 /* 884 * We don't update disk_i_size now, so record this undealt 885 * i_size. Or we will not know the real i_size. 886 */ 887 if (test->outstanding_isize < offset) 888 test->outstanding_isize = offset; 889 if (ordered && 890 ordered->outstanding_isize > test->outstanding_isize) 891 test->outstanding_isize = ordered->outstanding_isize; 892 goto out; 893 } 894 new_i_size = min_t(u64, offset, i_size); 895 896 /* 897 * Some ordered extents may completed before the current one, and 898 * we hold the real i_size in ->outstanding_isize. 899 */ 900 if (ordered && ordered->outstanding_isize > new_i_size) 901 new_i_size = min_t(u64, ordered->outstanding_isize, i_size); 902 BTRFS_I(inode)->disk_i_size = new_i_size; 903 ret = 0; 904 out: 905 /* 906 * We need to do this because we can't remove ordered extents until 907 * after the i_disk_size has been updated and then the inode has been 908 * updated to reflect the change, so we need to tell anybody who finds 909 * this ordered extent that we've already done all the real work, we 910 * just haven't completed all the other work. 911 */ 912 if (ordered) 913 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); 914 spin_unlock_irq(&tree->lock); 915 return ret; 916 } 917 918 /* 919 * search the ordered extents for one corresponding to 'offset' and 920 * try to find a checksum. This is used because we allow pages to 921 * be reclaimed before their checksum is actually put into the btree 922 */ 923 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, 924 u8 *sum, int len) 925 { 926 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 927 struct btrfs_ordered_sum *ordered_sum; 928 struct btrfs_ordered_extent *ordered; 929 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 930 unsigned long num_sectors; 931 unsigned long i; 932 u32 sectorsize = btrfs_inode_sectorsize(inode); 933 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 934 int index = 0; 935 936 ordered = btrfs_lookup_ordered_extent(inode, offset); 937 if (!ordered) 938 return 0; 939 940 spin_lock_irq(&tree->lock); 941 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 942 if (disk_bytenr >= ordered_sum->bytenr && 943 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { 944 i = (disk_bytenr - ordered_sum->bytenr) >> 945 inode->i_sb->s_blocksize_bits; 946 num_sectors = ordered_sum->len >> 947 inode->i_sb->s_blocksize_bits; 948 num_sectors = min_t(int, len - index, num_sectors - i); 949 memcpy(sum + index, ordered_sum->sums + i * csum_size, 950 num_sectors * csum_size); 951 952 index += (int)num_sectors * csum_size; 953 if (index == len) 954 goto out; 955 disk_bytenr += num_sectors * sectorsize; 956 } 957 } 958 out: 959 spin_unlock_irq(&tree->lock); 960 btrfs_put_ordered_extent(ordered); 961 return index; 962 } 963 964 /* 965 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending 966 * ordered extents in it are run to completion. 967 * 968 * @tree: IO tree used for locking out other users of the range 969 * @inode: Inode whose ordered tree is to be searched 970 * @start: Beginning of range to flush 971 * @end: Last byte of range to lock 972 * @cached_state: If passed, will return the extent state responsible for the 973 * locked range. It's the caller's responsibility to free the cached state. 974 * 975 * This function always returns with the given range locked, ensuring after it's 976 * called no order extent can be pending. 977 */ 978 void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, 979 struct btrfs_inode *inode, u64 start, 980 u64 end, 981 struct extent_state **cached_state) 982 { 983 struct btrfs_ordered_extent *ordered; 984 struct extent_state *cache = NULL; 985 struct extent_state **cachedp = &cache; 986 987 if (cached_state) 988 cachedp = cached_state; 989 990 while (1) { 991 lock_extent_bits(tree, start, end, cachedp); 992 ordered = btrfs_lookup_ordered_range(inode, start, 993 end - start + 1); 994 if (!ordered) { 995 /* 996 * If no external cached_state has been passed then 997 * decrement the extra ref taken for cachedp since we 998 * aren't exposing it outside of this function 999 */ 1000 if (!cached_state) 1001 refcount_dec(&cache->refs); 1002 break; 1003 } 1004 unlock_extent_cached(tree, start, end, cachedp); 1005 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); 1006 btrfs_put_ordered_extent(ordered); 1007 } 1008 } 1009 1010 int __init ordered_data_init(void) 1011 { 1012 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1013 sizeof(struct btrfs_ordered_extent), 0, 1014 SLAB_MEM_SPREAD, 1015 NULL); 1016 if (!btrfs_ordered_extent_cache) 1017 return -ENOMEM; 1018 1019 return 0; 1020 } 1021 1022 void __cold ordered_data_exit(void) 1023 { 1024 kmem_cache_destroy(btrfs_ordered_extent_cache); 1025 } 1026