1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/writeback.h> 9 #include <linux/sched/mm.h> 10 #include "misc.h" 11 #include "ctree.h" 12 #include "transaction.h" 13 #include "btrfs_inode.h" 14 #include "extent_io.h" 15 #include "disk-io.h" 16 #include "compression.h" 17 #include "delalloc-space.h" 18 #include "qgroup.h" 19 #include "subpage.h" 20 21 static struct kmem_cache *btrfs_ordered_extent_cache; 22 23 static u64 entry_end(struct btrfs_ordered_extent *entry) 24 { 25 if (entry->file_offset + entry->num_bytes < entry->file_offset) 26 return (u64)-1; 27 return entry->file_offset + entry->num_bytes; 28 } 29 30 /* returns NULL if the insertion worked, or it returns the node it did find 31 * in the tree 32 */ 33 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 34 struct rb_node *node) 35 { 36 struct rb_node **p = &root->rb_node; 37 struct rb_node *parent = NULL; 38 struct btrfs_ordered_extent *entry; 39 40 while (*p) { 41 parent = *p; 42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 43 44 if (file_offset < entry->file_offset) 45 p = &(*p)->rb_left; 46 else if (file_offset >= entry_end(entry)) 47 p = &(*p)->rb_right; 48 else 49 return parent; 50 } 51 52 rb_link_node(node, parent, p); 53 rb_insert_color(node, root); 54 return NULL; 55 } 56 57 /* 58 * look for a given offset in the tree, and if it can't be found return the 59 * first lesser offset 60 */ 61 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 62 struct rb_node **prev_ret) 63 { 64 struct rb_node *n = root->rb_node; 65 struct rb_node *prev = NULL; 66 struct rb_node *test; 67 struct btrfs_ordered_extent *entry; 68 struct btrfs_ordered_extent *prev_entry = NULL; 69 70 while (n) { 71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 72 prev = n; 73 prev_entry = entry; 74 75 if (file_offset < entry->file_offset) 76 n = n->rb_left; 77 else if (file_offset >= entry_end(entry)) 78 n = n->rb_right; 79 else 80 return n; 81 } 82 if (!prev_ret) 83 return NULL; 84 85 while (prev && file_offset >= entry_end(prev_entry)) { 86 test = rb_next(prev); 87 if (!test) 88 break; 89 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 90 rb_node); 91 if (file_offset < entry_end(prev_entry)) 92 break; 93 94 prev = test; 95 } 96 if (prev) 97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 98 rb_node); 99 while (prev && file_offset < entry_end(prev_entry)) { 100 test = rb_prev(prev); 101 if (!test) 102 break; 103 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 104 rb_node); 105 prev = test; 106 } 107 *prev_ret = prev; 108 return NULL; 109 } 110 111 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 112 u64 len) 113 { 114 if (file_offset + len <= entry->file_offset || 115 entry->file_offset + entry->num_bytes <= file_offset) 116 return 0; 117 return 1; 118 } 119 120 /* 121 * look find the first ordered struct that has this offset, otherwise 122 * the first one less than this offset 123 */ 124 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 125 u64 file_offset) 126 { 127 struct rb_root *root = &tree->tree; 128 struct rb_node *prev = NULL; 129 struct rb_node *ret; 130 struct btrfs_ordered_extent *entry; 131 132 if (tree->last) { 133 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 134 rb_node); 135 if (in_range(file_offset, entry->file_offset, entry->num_bytes)) 136 return tree->last; 137 } 138 ret = __tree_search(root, file_offset, &prev); 139 if (!ret) 140 ret = prev; 141 if (ret) 142 tree->last = ret; 143 return ret; 144 } 145 146 /* 147 * Allocate and add a new ordered_extent into the per-inode tree. 148 * 149 * The tree is given a single reference on the ordered extent that was 150 * inserted. 151 */ 152 static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, 153 u64 disk_bytenr, u64 num_bytes, 154 u64 disk_num_bytes, int type, int dio, 155 int compress_type) 156 { 157 struct btrfs_root *root = inode->root; 158 struct btrfs_fs_info *fs_info = root->fs_info; 159 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 160 struct rb_node *node; 161 struct btrfs_ordered_extent *entry; 162 int ret; 163 164 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) { 165 /* For nocow write, we can release the qgroup rsv right now */ 166 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes); 167 if (ret < 0) 168 return ret; 169 ret = 0; 170 } else { 171 /* 172 * The ordered extent has reserved qgroup space, release now 173 * and pass the reserved number for qgroup_record to free. 174 */ 175 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); 176 if (ret < 0) 177 return ret; 178 } 179 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 180 if (!entry) 181 return -ENOMEM; 182 183 entry->file_offset = file_offset; 184 entry->disk_bytenr = disk_bytenr; 185 entry->num_bytes = num_bytes; 186 entry->disk_num_bytes = disk_num_bytes; 187 entry->bytes_left = num_bytes; 188 entry->inode = igrab(&inode->vfs_inode); 189 entry->compress_type = compress_type; 190 entry->truncated_len = (u64)-1; 191 entry->qgroup_rsv = ret; 192 entry->physical = (u64)-1; 193 entry->disk = NULL; 194 entry->partno = (u8)-1; 195 196 ASSERT(type == BTRFS_ORDERED_REGULAR || 197 type == BTRFS_ORDERED_NOCOW || 198 type == BTRFS_ORDERED_PREALLOC || 199 type == BTRFS_ORDERED_COMPRESSED); 200 set_bit(type, &entry->flags); 201 202 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes, 203 fs_info->delalloc_batch); 204 205 if (dio) 206 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); 207 208 /* one ref for the tree */ 209 refcount_set(&entry->refs, 1); 210 init_waitqueue_head(&entry->wait); 211 INIT_LIST_HEAD(&entry->list); 212 INIT_LIST_HEAD(&entry->log_list); 213 INIT_LIST_HEAD(&entry->root_extent_list); 214 INIT_LIST_HEAD(&entry->work_list); 215 init_completion(&entry->completion); 216 217 trace_btrfs_ordered_extent_add(inode, entry); 218 219 spin_lock_irq(&tree->lock); 220 node = tree_insert(&tree->tree, file_offset, 221 &entry->rb_node); 222 if (node) 223 btrfs_panic(fs_info, -EEXIST, 224 "inconsistency in ordered tree at offset %llu", 225 file_offset); 226 spin_unlock_irq(&tree->lock); 227 228 spin_lock(&root->ordered_extent_lock); 229 list_add_tail(&entry->root_extent_list, 230 &root->ordered_extents); 231 root->nr_ordered_extents++; 232 if (root->nr_ordered_extents == 1) { 233 spin_lock(&fs_info->ordered_root_lock); 234 BUG_ON(!list_empty(&root->ordered_root)); 235 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 236 spin_unlock(&fs_info->ordered_root_lock); 237 } 238 spin_unlock(&root->ordered_extent_lock); 239 240 /* 241 * We don't need the count_max_extents here, we can assume that all of 242 * that work has been done at higher layers, so this is truly the 243 * smallest the extent is going to get. 244 */ 245 spin_lock(&inode->lock); 246 btrfs_mod_outstanding_extents(inode, 1); 247 spin_unlock(&inode->lock); 248 249 return 0; 250 } 251 252 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, 253 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, 254 int type) 255 { 256 ASSERT(type == BTRFS_ORDERED_REGULAR || 257 type == BTRFS_ORDERED_NOCOW || 258 type == BTRFS_ORDERED_PREALLOC); 259 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, 260 num_bytes, disk_num_bytes, type, 0, 261 BTRFS_COMPRESS_NONE); 262 } 263 264 int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset, 265 u64 disk_bytenr, u64 num_bytes, 266 u64 disk_num_bytes, int type) 267 { 268 ASSERT(type == BTRFS_ORDERED_REGULAR || 269 type == BTRFS_ORDERED_NOCOW || 270 type == BTRFS_ORDERED_PREALLOC); 271 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, 272 num_bytes, disk_num_bytes, type, 1, 273 BTRFS_COMPRESS_NONE); 274 } 275 276 int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset, 277 u64 disk_bytenr, u64 num_bytes, 278 u64 disk_num_bytes, int compress_type) 279 { 280 ASSERT(compress_type != BTRFS_COMPRESS_NONE); 281 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, 282 num_bytes, disk_num_bytes, 283 BTRFS_ORDERED_COMPRESSED, 0, 284 compress_type); 285 } 286 287 /* 288 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 289 * when an ordered extent is finished. If the list covers more than one 290 * ordered extent, it is split across multiples. 291 */ 292 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, 293 struct btrfs_ordered_sum *sum) 294 { 295 struct btrfs_ordered_inode_tree *tree; 296 297 tree = &BTRFS_I(entry->inode)->ordered_tree; 298 spin_lock_irq(&tree->lock); 299 list_add_tail(&sum->list, &entry->list); 300 spin_unlock_irq(&tree->lock); 301 } 302 303 /* 304 * Mark all ordered extents io inside the specified range finished. 305 * 306 * @page: The invovled page for the opeartion. 307 * For uncompressed buffered IO, the page status also needs to be 308 * updated to indicate whether the pending ordered io is finished. 309 * Can be NULL for direct IO and compressed write. 310 * For these cases, callers are ensured they won't execute the 311 * endio function twice. 312 * @finish_func: The function to be executed when all the IO of an ordered 313 * extent are finished. 314 * 315 * This function is called for endio, thus the range must have ordered 316 * extent(s) coveri it. 317 */ 318 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, 319 struct page *page, u64 file_offset, 320 u64 num_bytes, btrfs_func_t finish_func, 321 bool uptodate) 322 { 323 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 324 struct btrfs_fs_info *fs_info = inode->root->fs_info; 325 struct btrfs_workqueue *wq; 326 struct rb_node *node; 327 struct btrfs_ordered_extent *entry = NULL; 328 unsigned long flags; 329 u64 cur = file_offset; 330 331 if (btrfs_is_free_space_inode(inode)) 332 wq = fs_info->endio_freespace_worker; 333 else 334 wq = fs_info->endio_write_workers; 335 336 if (page) 337 ASSERT(page->mapping && page_offset(page) <= file_offset && 338 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE); 339 340 spin_lock_irqsave(&tree->lock, flags); 341 while (cur < file_offset + num_bytes) { 342 u64 entry_end; 343 u64 end; 344 u32 len; 345 346 node = tree_search(tree, cur); 347 /* No ordered extents at all */ 348 if (!node) 349 break; 350 351 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 352 entry_end = entry->file_offset + entry->num_bytes; 353 /* 354 * |<-- OE --->| | 355 * cur 356 * Go to next OE. 357 */ 358 if (cur >= entry_end) { 359 node = rb_next(node); 360 /* No more ordered extents, exit */ 361 if (!node) 362 break; 363 entry = rb_entry(node, struct btrfs_ordered_extent, 364 rb_node); 365 366 /* Go to next ordered extent and continue */ 367 cur = entry->file_offset; 368 continue; 369 } 370 /* 371 * | |<--- OE --->| 372 * cur 373 * Go to the start of OE. 374 */ 375 if (cur < entry->file_offset) { 376 cur = entry->file_offset; 377 continue; 378 } 379 380 /* 381 * Now we are definitely inside one ordered extent. 382 * 383 * |<--- OE --->| 384 * | 385 * cur 386 */ 387 end = min(entry->file_offset + entry->num_bytes, 388 file_offset + num_bytes) - 1; 389 ASSERT(end + 1 - cur < U32_MAX); 390 len = end + 1 - cur; 391 392 if (page) { 393 /* 394 * Ordered (Private2) bit indicates whether we still 395 * have pending io unfinished for the ordered extent. 396 * 397 * If there's no such bit, we need to skip to next range. 398 */ 399 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) { 400 cur += len; 401 continue; 402 } 403 btrfs_page_clear_ordered(fs_info, page, cur, len); 404 } 405 406 /* Now we're fine to update the accounting */ 407 if (unlikely(len > entry->bytes_left)) { 408 WARN_ON(1); 409 btrfs_crit(fs_info, 410 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu", 411 inode->root->root_key.objectid, 412 btrfs_ino(inode), 413 entry->file_offset, 414 entry->num_bytes, 415 len, entry->bytes_left); 416 entry->bytes_left = 0; 417 } else { 418 entry->bytes_left -= len; 419 } 420 421 if (!uptodate) 422 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 423 424 /* 425 * All the IO of the ordered extent is finished, we need to queue 426 * the finish_func to be executed. 427 */ 428 if (entry->bytes_left == 0) { 429 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 430 cond_wake_up(&entry->wait); 431 refcount_inc(&entry->refs); 432 spin_unlock_irqrestore(&tree->lock, flags); 433 btrfs_init_work(&entry->work, finish_func, NULL, NULL); 434 btrfs_queue_work(wq, &entry->work); 435 spin_lock_irqsave(&tree->lock, flags); 436 } 437 cur += len; 438 } 439 spin_unlock_irqrestore(&tree->lock, flags); 440 } 441 442 /* 443 * Finish IO for one ordered extent across a given range. The range can only 444 * contain one ordered extent. 445 * 446 * @cached: The cached ordered extent. If not NULL, we can skip the tree 447 * search and use the ordered extent directly. 448 * Will be also used to store the finished ordered extent. 449 * @file_offset: File offset for the finished IO 450 * @io_size: Length of the finish IO range 451 * @uptodate: If the IO finishes without problem 452 * 453 * Return true if the ordered extent is finished in the range, and update 454 * @cached. 455 * Return false otherwise. 456 * 457 * NOTE: The range can NOT cross multiple ordered extents. 458 * Thus caller should ensure the range doesn't cross ordered extents. 459 */ 460 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, 461 struct btrfs_ordered_extent **cached, 462 u64 file_offset, u64 io_size, int uptodate) 463 { 464 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 465 struct rb_node *node; 466 struct btrfs_ordered_extent *entry = NULL; 467 unsigned long flags; 468 bool finished = false; 469 470 spin_lock_irqsave(&tree->lock, flags); 471 if (cached && *cached) { 472 entry = *cached; 473 goto have_entry; 474 } 475 476 node = tree_search(tree, file_offset); 477 if (!node) 478 goto out; 479 480 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 481 have_entry: 482 if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) 483 goto out; 484 485 if (io_size > entry->bytes_left) 486 btrfs_crit(inode->root->fs_info, 487 "bad ordered accounting left %llu size %llu", 488 entry->bytes_left, io_size); 489 490 entry->bytes_left -= io_size; 491 if (!uptodate) 492 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 493 494 if (entry->bytes_left == 0) { 495 /* 496 * Ensure only one caller can set the flag and finished_ret 497 * accordingly 498 */ 499 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 500 /* test_and_set_bit implies a barrier */ 501 cond_wake_up_nomb(&entry->wait); 502 } 503 out: 504 if (finished && cached && entry) { 505 *cached = entry; 506 refcount_inc(&entry->refs); 507 } 508 spin_unlock_irqrestore(&tree->lock, flags); 509 return finished; 510 } 511 512 /* 513 * used to drop a reference on an ordered extent. This will free 514 * the extent if the last reference is dropped 515 */ 516 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 517 { 518 struct list_head *cur; 519 struct btrfs_ordered_sum *sum; 520 521 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry); 522 523 if (refcount_dec_and_test(&entry->refs)) { 524 ASSERT(list_empty(&entry->root_extent_list)); 525 ASSERT(list_empty(&entry->log_list)); 526 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 527 if (entry->inode) 528 btrfs_add_delayed_iput(entry->inode); 529 while (!list_empty(&entry->list)) { 530 cur = entry->list.next; 531 sum = list_entry(cur, struct btrfs_ordered_sum, list); 532 list_del(&sum->list); 533 kvfree(sum); 534 } 535 kmem_cache_free(btrfs_ordered_extent_cache, entry); 536 } 537 } 538 539 /* 540 * remove an ordered extent from the tree. No references are dropped 541 * and waiters are woken up. 542 */ 543 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode, 544 struct btrfs_ordered_extent *entry) 545 { 546 struct btrfs_ordered_inode_tree *tree; 547 struct btrfs_root *root = btrfs_inode->root; 548 struct btrfs_fs_info *fs_info = root->fs_info; 549 struct rb_node *node; 550 bool pending; 551 552 /* This is paired with btrfs_add_ordered_extent. */ 553 spin_lock(&btrfs_inode->lock); 554 btrfs_mod_outstanding_extents(btrfs_inode, -1); 555 spin_unlock(&btrfs_inode->lock); 556 if (root != fs_info->tree_root) 557 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes, 558 false); 559 560 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes, 561 fs_info->delalloc_batch); 562 563 tree = &btrfs_inode->ordered_tree; 564 spin_lock_irq(&tree->lock); 565 node = &entry->rb_node; 566 rb_erase(node, &tree->tree); 567 RB_CLEAR_NODE(node); 568 if (tree->last == node) 569 tree->last = NULL; 570 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 571 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags); 572 spin_unlock_irq(&tree->lock); 573 574 /* 575 * The current running transaction is waiting on us, we need to let it 576 * know that we're complete and wake it up. 577 */ 578 if (pending) { 579 struct btrfs_transaction *trans; 580 581 /* 582 * The checks for trans are just a formality, it should be set, 583 * but if it isn't we don't want to deref/assert under the spin 584 * lock, so be nice and check if trans is set, but ASSERT() so 585 * if it isn't set a developer will notice. 586 */ 587 spin_lock(&fs_info->trans_lock); 588 trans = fs_info->running_transaction; 589 if (trans) 590 refcount_inc(&trans->use_count); 591 spin_unlock(&fs_info->trans_lock); 592 593 ASSERT(trans); 594 if (trans) { 595 if (atomic_dec_and_test(&trans->pending_ordered)) 596 wake_up(&trans->pending_wait); 597 btrfs_put_transaction(trans); 598 } 599 } 600 601 spin_lock(&root->ordered_extent_lock); 602 list_del_init(&entry->root_extent_list); 603 root->nr_ordered_extents--; 604 605 trace_btrfs_ordered_extent_remove(btrfs_inode, entry); 606 607 if (!root->nr_ordered_extents) { 608 spin_lock(&fs_info->ordered_root_lock); 609 BUG_ON(list_empty(&root->ordered_root)); 610 list_del_init(&root->ordered_root); 611 spin_unlock(&fs_info->ordered_root_lock); 612 } 613 spin_unlock(&root->ordered_extent_lock); 614 wake_up(&entry->wait); 615 } 616 617 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 618 { 619 struct btrfs_ordered_extent *ordered; 620 621 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 622 btrfs_start_ordered_extent(ordered, 1); 623 complete(&ordered->completion); 624 } 625 626 /* 627 * wait for all the ordered extents in a root. This is done when balancing 628 * space between drives. 629 */ 630 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, 631 const u64 range_start, const u64 range_len) 632 { 633 struct btrfs_fs_info *fs_info = root->fs_info; 634 LIST_HEAD(splice); 635 LIST_HEAD(skipped); 636 LIST_HEAD(works); 637 struct btrfs_ordered_extent *ordered, *next; 638 u64 count = 0; 639 const u64 range_end = range_start + range_len; 640 641 mutex_lock(&root->ordered_extent_mutex); 642 spin_lock(&root->ordered_extent_lock); 643 list_splice_init(&root->ordered_extents, &splice); 644 while (!list_empty(&splice) && nr) { 645 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 646 root_extent_list); 647 648 if (range_end <= ordered->disk_bytenr || 649 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { 650 list_move_tail(&ordered->root_extent_list, &skipped); 651 cond_resched_lock(&root->ordered_extent_lock); 652 continue; 653 } 654 655 list_move_tail(&ordered->root_extent_list, 656 &root->ordered_extents); 657 refcount_inc(&ordered->refs); 658 spin_unlock(&root->ordered_extent_lock); 659 660 btrfs_init_work(&ordered->flush_work, 661 btrfs_run_ordered_extent_work, NULL, NULL); 662 list_add_tail(&ordered->work_list, &works); 663 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 664 665 cond_resched(); 666 spin_lock(&root->ordered_extent_lock); 667 if (nr != U64_MAX) 668 nr--; 669 count++; 670 } 671 list_splice_tail(&skipped, &root->ordered_extents); 672 list_splice_tail(&splice, &root->ordered_extents); 673 spin_unlock(&root->ordered_extent_lock); 674 675 list_for_each_entry_safe(ordered, next, &works, work_list) { 676 list_del_init(&ordered->work_list); 677 wait_for_completion(&ordered->completion); 678 btrfs_put_ordered_extent(ordered); 679 cond_resched(); 680 } 681 mutex_unlock(&root->ordered_extent_mutex); 682 683 return count; 684 } 685 686 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, 687 const u64 range_start, const u64 range_len) 688 { 689 struct btrfs_root *root; 690 struct list_head splice; 691 u64 done; 692 693 INIT_LIST_HEAD(&splice); 694 695 mutex_lock(&fs_info->ordered_operations_mutex); 696 spin_lock(&fs_info->ordered_root_lock); 697 list_splice_init(&fs_info->ordered_roots, &splice); 698 while (!list_empty(&splice) && nr) { 699 root = list_first_entry(&splice, struct btrfs_root, 700 ordered_root); 701 root = btrfs_grab_root(root); 702 BUG_ON(!root); 703 list_move_tail(&root->ordered_root, 704 &fs_info->ordered_roots); 705 spin_unlock(&fs_info->ordered_root_lock); 706 707 done = btrfs_wait_ordered_extents(root, nr, 708 range_start, range_len); 709 btrfs_put_root(root); 710 711 spin_lock(&fs_info->ordered_root_lock); 712 if (nr != U64_MAX) { 713 nr -= done; 714 } 715 } 716 list_splice_tail(&splice, &fs_info->ordered_roots); 717 spin_unlock(&fs_info->ordered_root_lock); 718 mutex_unlock(&fs_info->ordered_operations_mutex); 719 } 720 721 /* 722 * Used to start IO or wait for a given ordered extent to finish. 723 * 724 * If wait is one, this effectively waits on page writeback for all the pages 725 * in the extent, and it waits on the io completion code to insert 726 * metadata into the btree corresponding to the extent 727 */ 728 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait) 729 { 730 u64 start = entry->file_offset; 731 u64 end = start + entry->num_bytes - 1; 732 struct btrfs_inode *inode = BTRFS_I(entry->inode); 733 734 trace_btrfs_ordered_extent_start(inode, entry); 735 736 /* 737 * pages in the range can be dirty, clean or writeback. We 738 * start IO on any dirty ones so the wait doesn't stall waiting 739 * for the flusher thread to find them 740 */ 741 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 742 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end); 743 if (wait) { 744 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 745 &entry->flags)); 746 } 747 } 748 749 /* 750 * Used to wait on ordered extents across a large range of bytes. 751 */ 752 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 753 { 754 int ret = 0; 755 int ret_wb = 0; 756 u64 end; 757 u64 orig_end; 758 struct btrfs_ordered_extent *ordered; 759 760 if (start + len < start) { 761 orig_end = INT_LIMIT(loff_t); 762 } else { 763 orig_end = start + len - 1; 764 if (orig_end > INT_LIMIT(loff_t)) 765 orig_end = INT_LIMIT(loff_t); 766 } 767 768 /* start IO across the range first to instantiate any delalloc 769 * extents 770 */ 771 ret = btrfs_fdatawrite_range(inode, start, orig_end); 772 if (ret) 773 return ret; 774 775 /* 776 * If we have a writeback error don't return immediately. Wait first 777 * for any ordered extents that haven't completed yet. This is to make 778 * sure no one can dirty the same page ranges and call writepages() 779 * before the ordered extents complete - to avoid failures (-EEXIST) 780 * when adding the new ordered extents to the ordered tree. 781 */ 782 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 783 784 end = orig_end; 785 while (1) { 786 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end); 787 if (!ordered) 788 break; 789 if (ordered->file_offset > orig_end) { 790 btrfs_put_ordered_extent(ordered); 791 break; 792 } 793 if (ordered->file_offset + ordered->num_bytes <= start) { 794 btrfs_put_ordered_extent(ordered); 795 break; 796 } 797 btrfs_start_ordered_extent(ordered, 1); 798 end = ordered->file_offset; 799 /* 800 * If the ordered extent had an error save the error but don't 801 * exit without waiting first for all other ordered extents in 802 * the range to complete. 803 */ 804 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 805 ret = -EIO; 806 btrfs_put_ordered_extent(ordered); 807 if (end == 0 || end == start) 808 break; 809 end--; 810 } 811 return ret_wb ? ret_wb : ret; 812 } 813 814 /* 815 * find an ordered extent corresponding to file_offset. return NULL if 816 * nothing is found, otherwise take a reference on the extent and return it 817 */ 818 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, 819 u64 file_offset) 820 { 821 struct btrfs_ordered_inode_tree *tree; 822 struct rb_node *node; 823 struct btrfs_ordered_extent *entry = NULL; 824 unsigned long flags; 825 826 tree = &inode->ordered_tree; 827 spin_lock_irqsave(&tree->lock, flags); 828 node = tree_search(tree, file_offset); 829 if (!node) 830 goto out; 831 832 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 833 if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) 834 entry = NULL; 835 if (entry) 836 refcount_inc(&entry->refs); 837 out: 838 spin_unlock_irqrestore(&tree->lock, flags); 839 return entry; 840 } 841 842 /* Since the DIO code tries to lock a wide area we need to look for any ordered 843 * extents that exist in the range, rather than just the start of the range. 844 */ 845 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 846 struct btrfs_inode *inode, u64 file_offset, u64 len) 847 { 848 struct btrfs_ordered_inode_tree *tree; 849 struct rb_node *node; 850 struct btrfs_ordered_extent *entry = NULL; 851 852 tree = &inode->ordered_tree; 853 spin_lock_irq(&tree->lock); 854 node = tree_search(tree, file_offset); 855 if (!node) { 856 node = tree_search(tree, file_offset + len); 857 if (!node) 858 goto out; 859 } 860 861 while (1) { 862 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 863 if (range_overlaps(entry, file_offset, len)) 864 break; 865 866 if (entry->file_offset >= file_offset + len) { 867 entry = NULL; 868 break; 869 } 870 entry = NULL; 871 node = rb_next(node); 872 if (!node) 873 break; 874 } 875 out: 876 if (entry) 877 refcount_inc(&entry->refs); 878 spin_unlock_irq(&tree->lock); 879 return entry; 880 } 881 882 /* 883 * Adds all ordered extents to the given list. The list ends up sorted by the 884 * file_offset of the ordered extents. 885 */ 886 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode, 887 struct list_head *list) 888 { 889 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 890 struct rb_node *n; 891 892 ASSERT(inode_is_locked(&inode->vfs_inode)); 893 894 spin_lock_irq(&tree->lock); 895 for (n = rb_first(&tree->tree); n; n = rb_next(n)) { 896 struct btrfs_ordered_extent *ordered; 897 898 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 899 900 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) 901 continue; 902 903 ASSERT(list_empty(&ordered->log_list)); 904 list_add_tail(&ordered->log_list, list); 905 refcount_inc(&ordered->refs); 906 } 907 spin_unlock_irq(&tree->lock); 908 } 909 910 /* 911 * lookup and return any extent before 'file_offset'. NULL is returned 912 * if none is found 913 */ 914 struct btrfs_ordered_extent * 915 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset) 916 { 917 struct btrfs_ordered_inode_tree *tree; 918 struct rb_node *node; 919 struct btrfs_ordered_extent *entry = NULL; 920 921 tree = &inode->ordered_tree; 922 spin_lock_irq(&tree->lock); 923 node = tree_search(tree, file_offset); 924 if (!node) 925 goto out; 926 927 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 928 refcount_inc(&entry->refs); 929 out: 930 spin_unlock_irq(&tree->lock); 931 return entry; 932 } 933 934 /* 935 * Lookup the first ordered extent that overlaps the range 936 * [@file_offset, @file_offset + @len). 937 * 938 * The difference between this and btrfs_lookup_first_ordered_extent() is 939 * that this one won't return any ordered extent that does not overlap the range. 940 * And the difference against btrfs_lookup_ordered_extent() is, this function 941 * ensures the first ordered extent gets returned. 942 */ 943 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range( 944 struct btrfs_inode *inode, u64 file_offset, u64 len) 945 { 946 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 947 struct rb_node *node; 948 struct rb_node *cur; 949 struct rb_node *prev; 950 struct rb_node *next; 951 struct btrfs_ordered_extent *entry = NULL; 952 953 spin_lock_irq(&tree->lock); 954 node = tree->tree.rb_node; 955 /* 956 * Here we don't want to use tree_search() which will use tree->last 957 * and screw up the search order. 958 * And __tree_search() can't return the adjacent ordered extents 959 * either, thus here we do our own search. 960 */ 961 while (node) { 962 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 963 964 if (file_offset < entry->file_offset) { 965 node = node->rb_left; 966 } else if (file_offset >= entry_end(entry)) { 967 node = node->rb_right; 968 } else { 969 /* 970 * Direct hit, got an ordered extent that starts at 971 * @file_offset 972 */ 973 goto out; 974 } 975 } 976 if (!entry) { 977 /* Empty tree */ 978 goto out; 979 } 980 981 cur = &entry->rb_node; 982 /* We got an entry around @file_offset, check adjacent entries */ 983 if (entry->file_offset < file_offset) { 984 prev = cur; 985 next = rb_next(cur); 986 } else { 987 prev = rb_prev(cur); 988 next = cur; 989 } 990 if (prev) { 991 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); 992 if (range_overlaps(entry, file_offset, len)) 993 goto out; 994 } 995 if (next) { 996 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); 997 if (range_overlaps(entry, file_offset, len)) 998 goto out; 999 } 1000 /* No ordered extent in the range */ 1001 entry = NULL; 1002 out: 1003 if (entry) 1004 refcount_inc(&entry->refs); 1005 spin_unlock_irq(&tree->lock); 1006 return entry; 1007 } 1008 1009 /* 1010 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending 1011 * ordered extents in it are run to completion. 1012 * 1013 * @inode: Inode whose ordered tree is to be searched 1014 * @start: Beginning of range to flush 1015 * @end: Last byte of range to lock 1016 * @cached_state: If passed, will return the extent state responsible for the 1017 * locked range. It's the caller's responsibility to free the cached state. 1018 * 1019 * This function always returns with the given range locked, ensuring after it's 1020 * called no order extent can be pending. 1021 */ 1022 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, 1023 u64 end, 1024 struct extent_state **cached_state) 1025 { 1026 struct btrfs_ordered_extent *ordered; 1027 struct extent_state *cache = NULL; 1028 struct extent_state **cachedp = &cache; 1029 1030 if (cached_state) 1031 cachedp = cached_state; 1032 1033 while (1) { 1034 lock_extent_bits(&inode->io_tree, start, end, cachedp); 1035 ordered = btrfs_lookup_ordered_range(inode, start, 1036 end - start + 1); 1037 if (!ordered) { 1038 /* 1039 * If no external cached_state has been passed then 1040 * decrement the extra ref taken for cachedp since we 1041 * aren't exposing it outside of this function 1042 */ 1043 if (!cached_state) 1044 refcount_dec(&cache->refs); 1045 break; 1046 } 1047 unlock_extent_cached(&inode->io_tree, start, end, cachedp); 1048 btrfs_start_ordered_extent(ordered, 1); 1049 btrfs_put_ordered_extent(ordered); 1050 } 1051 } 1052 1053 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos, 1054 u64 len) 1055 { 1056 struct inode *inode = ordered->inode; 1057 u64 file_offset = ordered->file_offset + pos; 1058 u64 disk_bytenr = ordered->disk_bytenr + pos; 1059 u64 num_bytes = len; 1060 u64 disk_num_bytes = len; 1061 int type; 1062 unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT); 1063 int compress_type = ordered->compress_type; 1064 unsigned long weight; 1065 int ret; 1066 1067 weight = hweight_long(flags_masked); 1068 WARN_ON_ONCE(weight > 1); 1069 if (!weight) 1070 type = 0; 1071 else 1072 type = __ffs(flags_masked); 1073 1074 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) { 1075 WARN_ON_ONCE(1); 1076 ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode), 1077 file_offset, disk_bytenr, num_bytes, 1078 disk_num_bytes, compress_type); 1079 } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { 1080 ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset, 1081 disk_bytenr, num_bytes, disk_num_bytes, type); 1082 } else { 1083 ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, 1084 disk_bytenr, num_bytes, disk_num_bytes, type); 1085 } 1086 1087 return ret; 1088 } 1089 1090 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre, 1091 u64 post) 1092 { 1093 struct inode *inode = ordered->inode; 1094 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 1095 struct rb_node *node; 1096 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1097 int ret = 0; 1098 1099 spin_lock_irq(&tree->lock); 1100 /* Remove from tree once */ 1101 node = &ordered->rb_node; 1102 rb_erase(node, &tree->tree); 1103 RB_CLEAR_NODE(node); 1104 if (tree->last == node) 1105 tree->last = NULL; 1106 1107 ordered->file_offset += pre; 1108 ordered->disk_bytenr += pre; 1109 ordered->num_bytes -= (pre + post); 1110 ordered->disk_num_bytes -= (pre + post); 1111 ordered->bytes_left -= (pre + post); 1112 1113 /* Re-insert the node */ 1114 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node); 1115 if (node) 1116 btrfs_panic(fs_info, -EEXIST, 1117 "zoned: inconsistency in ordered tree at offset %llu", 1118 ordered->file_offset); 1119 1120 spin_unlock_irq(&tree->lock); 1121 1122 if (pre) 1123 ret = clone_ordered_extent(ordered, 0, pre); 1124 if (ret == 0 && post) 1125 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes, 1126 post); 1127 1128 return ret; 1129 } 1130 1131 int __init ordered_data_init(void) 1132 { 1133 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1134 sizeof(struct btrfs_ordered_extent), 0, 1135 SLAB_MEM_SPREAD, 1136 NULL); 1137 if (!btrfs_ordered_extent_cache) 1138 return -ENOMEM; 1139 1140 return 0; 1141 } 1142 1143 void __cold ordered_data_exit(void) 1144 { 1145 kmem_cache_destroy(btrfs_ordered_extent_cache); 1146 } 1147