1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/writeback.h> 9 #include <linux/sched/mm.h> 10 #include "messages.h" 11 #include "misc.h" 12 #include "ctree.h" 13 #include "transaction.h" 14 #include "btrfs_inode.h" 15 #include "extent_io.h" 16 #include "disk-io.h" 17 #include "compression.h" 18 #include "delalloc-space.h" 19 #include "qgroup.h" 20 #include "subpage.h" 21 #include "file.h" 22 #include "super.h" 23 24 static struct kmem_cache *btrfs_ordered_extent_cache; 25 26 static u64 entry_end(struct btrfs_ordered_extent *entry) 27 { 28 if (entry->file_offset + entry->num_bytes < entry->file_offset) 29 return (u64)-1; 30 return entry->file_offset + entry->num_bytes; 31 } 32 33 /* returns NULL if the insertion worked, or it returns the node it did find 34 * in the tree 35 */ 36 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 37 struct rb_node *node) 38 { 39 struct rb_node **p = &root->rb_node; 40 struct rb_node *parent = NULL; 41 struct btrfs_ordered_extent *entry; 42 43 while (*p) { 44 parent = *p; 45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 46 47 if (file_offset < entry->file_offset) 48 p = &(*p)->rb_left; 49 else if (file_offset >= entry_end(entry)) 50 p = &(*p)->rb_right; 51 else 52 return parent; 53 } 54 55 rb_link_node(node, parent, p); 56 rb_insert_color(node, root); 57 return NULL; 58 } 59 60 /* 61 * look for a given offset in the tree, and if it can't be found return the 62 * first lesser offset 63 */ 64 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 65 struct rb_node **prev_ret) 66 { 67 struct rb_node *n = root->rb_node; 68 struct rb_node *prev = NULL; 69 struct rb_node *test; 70 struct btrfs_ordered_extent *entry; 71 struct btrfs_ordered_extent *prev_entry = NULL; 72 73 while (n) { 74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 75 prev = n; 76 prev_entry = entry; 77 78 if (file_offset < entry->file_offset) 79 n = n->rb_left; 80 else if (file_offset >= entry_end(entry)) 81 n = n->rb_right; 82 else 83 return n; 84 } 85 if (!prev_ret) 86 return NULL; 87 88 while (prev && file_offset >= entry_end(prev_entry)) { 89 test = rb_next(prev); 90 if (!test) 91 break; 92 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 93 rb_node); 94 if (file_offset < entry_end(prev_entry)) 95 break; 96 97 prev = test; 98 } 99 if (prev) 100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 101 rb_node); 102 while (prev && file_offset < entry_end(prev_entry)) { 103 test = rb_prev(prev); 104 if (!test) 105 break; 106 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 107 rb_node); 108 prev = test; 109 } 110 *prev_ret = prev; 111 return NULL; 112 } 113 114 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 115 u64 len) 116 { 117 if (file_offset + len <= entry->file_offset || 118 entry->file_offset + entry->num_bytes <= file_offset) 119 return 0; 120 return 1; 121 } 122 123 /* 124 * look find the first ordered struct that has this offset, otherwise 125 * the first one less than this offset 126 */ 127 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 128 u64 file_offset) 129 { 130 struct rb_root *root = &tree->tree; 131 struct rb_node *prev = NULL; 132 struct rb_node *ret; 133 struct btrfs_ordered_extent *entry; 134 135 if (tree->last) { 136 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 137 rb_node); 138 if (in_range(file_offset, entry->file_offset, entry->num_bytes)) 139 return tree->last; 140 } 141 ret = __tree_search(root, file_offset, &prev); 142 if (!ret) 143 ret = prev; 144 if (ret) 145 tree->last = ret; 146 return ret; 147 } 148 149 static struct btrfs_ordered_extent *alloc_ordered_extent( 150 struct btrfs_inode *inode, u64 file_offset, u64 num_bytes, 151 u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes, 152 u64 offset, unsigned long flags, int compress_type) 153 { 154 struct btrfs_ordered_extent *entry; 155 int ret; 156 u64 qgroup_rsv = 0; 157 158 if (flags & 159 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { 160 /* For nocow write, we can release the qgroup rsv right now */ 161 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv); 162 if (ret < 0) 163 return ERR_PTR(ret); 164 } else { 165 /* 166 * The ordered extent has reserved qgroup space, release now 167 * and pass the reserved number for qgroup_record to free. 168 */ 169 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv); 170 if (ret < 0) 171 return ERR_PTR(ret); 172 } 173 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 174 if (!entry) 175 return ERR_PTR(-ENOMEM); 176 177 entry->file_offset = file_offset; 178 entry->num_bytes = num_bytes; 179 entry->ram_bytes = ram_bytes; 180 entry->disk_bytenr = disk_bytenr; 181 entry->disk_num_bytes = disk_num_bytes; 182 entry->offset = offset; 183 entry->bytes_left = num_bytes; 184 entry->inode = igrab(&inode->vfs_inode); 185 entry->compress_type = compress_type; 186 entry->truncated_len = (u64)-1; 187 entry->qgroup_rsv = qgroup_rsv; 188 entry->flags = flags; 189 refcount_set(&entry->refs, 1); 190 init_waitqueue_head(&entry->wait); 191 INIT_LIST_HEAD(&entry->list); 192 INIT_LIST_HEAD(&entry->log_list); 193 INIT_LIST_HEAD(&entry->root_extent_list); 194 INIT_LIST_HEAD(&entry->work_list); 195 init_completion(&entry->completion); 196 197 /* 198 * We don't need the count_max_extents here, we can assume that all of 199 * that work has been done at higher layers, so this is truly the 200 * smallest the extent is going to get. 201 */ 202 spin_lock(&inode->lock); 203 btrfs_mod_outstanding_extents(inode, 1); 204 spin_unlock(&inode->lock); 205 206 return entry; 207 } 208 209 static void insert_ordered_extent(struct btrfs_ordered_extent *entry) 210 { 211 struct btrfs_inode *inode = BTRFS_I(entry->inode); 212 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 213 struct btrfs_root *root = inode->root; 214 struct btrfs_fs_info *fs_info = root->fs_info; 215 struct rb_node *node; 216 217 trace_btrfs_ordered_extent_add(inode, entry); 218 219 percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes, 220 fs_info->delalloc_batch); 221 222 /* One ref for the tree. */ 223 refcount_inc(&entry->refs); 224 225 spin_lock_irq(&tree->lock); 226 node = tree_insert(&tree->tree, entry->file_offset, &entry->rb_node); 227 if (node) 228 btrfs_panic(fs_info, -EEXIST, 229 "inconsistency in ordered tree at offset %llu", 230 entry->file_offset); 231 spin_unlock_irq(&tree->lock); 232 233 spin_lock(&root->ordered_extent_lock); 234 list_add_tail(&entry->root_extent_list, 235 &root->ordered_extents); 236 root->nr_ordered_extents++; 237 if (root->nr_ordered_extents == 1) { 238 spin_lock(&fs_info->ordered_root_lock); 239 BUG_ON(!list_empty(&root->ordered_root)); 240 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 241 spin_unlock(&fs_info->ordered_root_lock); 242 } 243 spin_unlock(&root->ordered_extent_lock); 244 } 245 246 /* 247 * Add an ordered extent to the per-inode tree. 248 * 249 * @inode: Inode that this extent is for. 250 * @file_offset: Logical offset in file where the extent starts. 251 * @num_bytes: Logical length of extent in file. 252 * @ram_bytes: Full length of unencoded data. 253 * @disk_bytenr: Offset of extent on disk. 254 * @disk_num_bytes: Size of extent on disk. 255 * @offset: Offset into unencoded data where file data starts. 256 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*). 257 * @compress_type: Compression algorithm used for data. 258 * 259 * Most of these parameters correspond to &struct btrfs_file_extent_item. The 260 * tree is given a single reference on the ordered extent that was inserted, and 261 * the returned pointer is given a second reference. 262 * 263 * Return: the new ordered extent or error pointer. 264 */ 265 struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( 266 struct btrfs_inode *inode, u64 file_offset, 267 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, 268 u64 disk_num_bytes, u64 offset, unsigned long flags, 269 int compress_type) 270 { 271 struct btrfs_ordered_extent *entry; 272 273 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0); 274 275 entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes, 276 disk_bytenr, disk_num_bytes, offset, flags, 277 compress_type); 278 if (!IS_ERR(entry)) 279 insert_ordered_extent(entry); 280 return entry; 281 } 282 283 /* 284 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 285 * when an ordered extent is finished. If the list covers more than one 286 * ordered extent, it is split across multiples. 287 */ 288 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, 289 struct btrfs_ordered_sum *sum) 290 { 291 struct btrfs_ordered_inode_tree *tree; 292 293 tree = &BTRFS_I(entry->inode)->ordered_tree; 294 spin_lock_irq(&tree->lock); 295 list_add_tail(&sum->list, &entry->list); 296 spin_unlock_irq(&tree->lock); 297 } 298 299 static void finish_ordered_fn(struct btrfs_work *work) 300 { 301 struct btrfs_ordered_extent *ordered_extent; 302 303 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 304 btrfs_finish_ordered_io(ordered_extent); 305 } 306 307 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, 308 struct page *page, u64 file_offset, 309 u64 len, bool uptodate) 310 { 311 struct btrfs_inode *inode = BTRFS_I(ordered->inode); 312 struct btrfs_fs_info *fs_info = inode->root->fs_info; 313 314 lockdep_assert_held(&inode->ordered_tree.lock); 315 316 if (page) { 317 ASSERT(page->mapping); 318 ASSERT(page_offset(page) <= file_offset); 319 ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE); 320 321 /* 322 * Ordered (Private2) bit indicates whether we still have 323 * pending io unfinished for the ordered extent. 324 * 325 * If there's no such bit, we need to skip to next range. 326 */ 327 if (!btrfs_page_test_ordered(fs_info, page, file_offset, len)) 328 return false; 329 btrfs_page_clear_ordered(fs_info, page, file_offset, len); 330 } 331 332 /* Now we're fine to update the accounting. */ 333 if (WARN_ON_ONCE(len > ordered->bytes_left)) { 334 btrfs_crit(fs_info, 335 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu", 336 inode->root->root_key.objectid, btrfs_ino(inode), 337 ordered->file_offset, ordered->num_bytes, 338 len, ordered->bytes_left); 339 ordered->bytes_left = 0; 340 } else { 341 ordered->bytes_left -= len; 342 } 343 344 if (!uptodate) 345 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 346 347 if (ordered->bytes_left) 348 return false; 349 350 /* 351 * All the IO of the ordered extent is finished, we need to queue 352 * the finish_func to be executed. 353 */ 354 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags); 355 cond_wake_up(&ordered->wait); 356 refcount_inc(&ordered->refs); 357 trace_btrfs_ordered_extent_mark_finished(inode, ordered); 358 return true; 359 } 360 361 static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered) 362 { 363 struct btrfs_inode *inode = BTRFS_I(ordered->inode); 364 struct btrfs_fs_info *fs_info = inode->root->fs_info; 365 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ? 366 fs_info->endio_freespace_worker : fs_info->endio_write_workers; 367 368 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL); 369 btrfs_queue_work(wq, &ordered->work); 370 } 371 372 bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered, 373 struct page *page, u64 file_offset, u64 len, 374 bool uptodate) 375 { 376 struct btrfs_inode *inode = BTRFS_I(ordered->inode); 377 unsigned long flags; 378 bool ret; 379 380 trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate); 381 382 spin_lock_irqsave(&inode->ordered_tree.lock, flags); 383 ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate); 384 spin_unlock_irqrestore(&inode->ordered_tree.lock, flags); 385 386 if (ret) 387 btrfs_queue_ordered_fn(ordered); 388 return ret; 389 } 390 391 /* 392 * Mark all ordered extents io inside the specified range finished. 393 * 394 * @page: The involved page for the operation. 395 * For uncompressed buffered IO, the page status also needs to be 396 * updated to indicate whether the pending ordered io is finished. 397 * Can be NULL for direct IO and compressed write. 398 * For these cases, callers are ensured they won't execute the 399 * endio function twice. 400 * 401 * This function is called for endio, thus the range must have ordered 402 * extent(s) covering it. 403 */ 404 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, 405 struct page *page, u64 file_offset, 406 u64 num_bytes, bool uptodate) 407 { 408 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 409 struct rb_node *node; 410 struct btrfs_ordered_extent *entry = NULL; 411 unsigned long flags; 412 u64 cur = file_offset; 413 414 trace_btrfs_writepage_end_io_hook(inode, file_offset, 415 file_offset + num_bytes - 1, 416 uptodate); 417 418 spin_lock_irqsave(&tree->lock, flags); 419 while (cur < file_offset + num_bytes) { 420 u64 entry_end; 421 u64 end; 422 u32 len; 423 424 node = tree_search(tree, cur); 425 /* No ordered extents at all */ 426 if (!node) 427 break; 428 429 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 430 entry_end = entry->file_offset + entry->num_bytes; 431 /* 432 * |<-- OE --->| | 433 * cur 434 * Go to next OE. 435 */ 436 if (cur >= entry_end) { 437 node = rb_next(node); 438 /* No more ordered extents, exit */ 439 if (!node) 440 break; 441 entry = rb_entry(node, struct btrfs_ordered_extent, 442 rb_node); 443 444 /* Go to next ordered extent and continue */ 445 cur = entry->file_offset; 446 continue; 447 } 448 /* 449 * | |<--- OE --->| 450 * cur 451 * Go to the start of OE. 452 */ 453 if (cur < entry->file_offset) { 454 cur = entry->file_offset; 455 continue; 456 } 457 458 /* 459 * Now we are definitely inside one ordered extent. 460 * 461 * |<--- OE --->| 462 * | 463 * cur 464 */ 465 end = min(entry->file_offset + entry->num_bytes, 466 file_offset + num_bytes) - 1; 467 ASSERT(end + 1 - cur < U32_MAX); 468 len = end + 1 - cur; 469 470 if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) { 471 spin_unlock_irqrestore(&tree->lock, flags); 472 btrfs_queue_ordered_fn(entry); 473 spin_lock_irqsave(&tree->lock, flags); 474 } 475 cur += len; 476 } 477 spin_unlock_irqrestore(&tree->lock, flags); 478 } 479 480 /* 481 * Finish IO for one ordered extent across a given range. The range can only 482 * contain one ordered extent. 483 * 484 * @cached: The cached ordered extent. If not NULL, we can skip the tree 485 * search and use the ordered extent directly. 486 * Will be also used to store the finished ordered extent. 487 * @file_offset: File offset for the finished IO 488 * @io_size: Length of the finish IO range 489 * 490 * Return true if the ordered extent is finished in the range, and update 491 * @cached. 492 * Return false otherwise. 493 * 494 * NOTE: The range can NOT cross multiple ordered extents. 495 * Thus caller should ensure the range doesn't cross ordered extents. 496 */ 497 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, 498 struct btrfs_ordered_extent **cached, 499 u64 file_offset, u64 io_size) 500 { 501 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 502 struct rb_node *node; 503 struct btrfs_ordered_extent *entry = NULL; 504 unsigned long flags; 505 bool finished = false; 506 507 spin_lock_irqsave(&tree->lock, flags); 508 if (cached && *cached) { 509 entry = *cached; 510 goto have_entry; 511 } 512 513 node = tree_search(tree, file_offset); 514 if (!node) 515 goto out; 516 517 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 518 have_entry: 519 if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) 520 goto out; 521 522 if (io_size > entry->bytes_left) 523 btrfs_crit(inode->root->fs_info, 524 "bad ordered accounting left %llu size %llu", 525 entry->bytes_left, io_size); 526 527 entry->bytes_left -= io_size; 528 529 if (entry->bytes_left == 0) { 530 /* 531 * Ensure only one caller can set the flag and finished_ret 532 * accordingly 533 */ 534 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 535 /* test_and_set_bit implies a barrier */ 536 cond_wake_up_nomb(&entry->wait); 537 } 538 out: 539 if (finished && cached && entry) { 540 *cached = entry; 541 refcount_inc(&entry->refs); 542 trace_btrfs_ordered_extent_dec_test_pending(inode, entry); 543 } 544 spin_unlock_irqrestore(&tree->lock, flags); 545 return finished; 546 } 547 548 /* 549 * used to drop a reference on an ordered extent. This will free 550 * the extent if the last reference is dropped 551 */ 552 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 553 { 554 struct list_head *cur; 555 struct btrfs_ordered_sum *sum; 556 557 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry); 558 559 if (refcount_dec_and_test(&entry->refs)) { 560 ASSERT(list_empty(&entry->root_extent_list)); 561 ASSERT(list_empty(&entry->log_list)); 562 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 563 if (entry->inode) 564 btrfs_add_delayed_iput(BTRFS_I(entry->inode)); 565 while (!list_empty(&entry->list)) { 566 cur = entry->list.next; 567 sum = list_entry(cur, struct btrfs_ordered_sum, list); 568 list_del(&sum->list); 569 kvfree(sum); 570 } 571 kmem_cache_free(btrfs_ordered_extent_cache, entry); 572 } 573 } 574 575 /* 576 * remove an ordered extent from the tree. No references are dropped 577 * and waiters are woken up. 578 */ 579 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode, 580 struct btrfs_ordered_extent *entry) 581 { 582 struct btrfs_ordered_inode_tree *tree; 583 struct btrfs_root *root = btrfs_inode->root; 584 struct btrfs_fs_info *fs_info = root->fs_info; 585 struct rb_node *node; 586 bool pending; 587 bool freespace_inode; 588 589 /* 590 * If this is a free space inode the thread has not acquired the ordered 591 * extents lockdep map. 592 */ 593 freespace_inode = btrfs_is_free_space_inode(btrfs_inode); 594 595 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered); 596 /* This is paired with btrfs_alloc_ordered_extent. */ 597 spin_lock(&btrfs_inode->lock); 598 btrfs_mod_outstanding_extents(btrfs_inode, -1); 599 spin_unlock(&btrfs_inode->lock); 600 if (root != fs_info->tree_root) { 601 u64 release; 602 603 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags)) 604 release = entry->disk_num_bytes; 605 else 606 release = entry->num_bytes; 607 btrfs_delalloc_release_metadata(btrfs_inode, release, 608 test_bit(BTRFS_ORDERED_IOERR, 609 &entry->flags)); 610 } 611 612 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes, 613 fs_info->delalloc_batch); 614 615 tree = &btrfs_inode->ordered_tree; 616 spin_lock_irq(&tree->lock); 617 node = &entry->rb_node; 618 rb_erase(node, &tree->tree); 619 RB_CLEAR_NODE(node); 620 if (tree->last == node) 621 tree->last = NULL; 622 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 623 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags); 624 spin_unlock_irq(&tree->lock); 625 626 /* 627 * The current running transaction is waiting on us, we need to let it 628 * know that we're complete and wake it up. 629 */ 630 if (pending) { 631 struct btrfs_transaction *trans; 632 633 /* 634 * The checks for trans are just a formality, it should be set, 635 * but if it isn't we don't want to deref/assert under the spin 636 * lock, so be nice and check if trans is set, but ASSERT() so 637 * if it isn't set a developer will notice. 638 */ 639 spin_lock(&fs_info->trans_lock); 640 trans = fs_info->running_transaction; 641 if (trans) 642 refcount_inc(&trans->use_count); 643 spin_unlock(&fs_info->trans_lock); 644 645 ASSERT(trans || BTRFS_FS_ERROR(fs_info)); 646 if (trans) { 647 if (atomic_dec_and_test(&trans->pending_ordered)) 648 wake_up(&trans->pending_wait); 649 btrfs_put_transaction(trans); 650 } 651 } 652 653 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered); 654 655 spin_lock(&root->ordered_extent_lock); 656 list_del_init(&entry->root_extent_list); 657 root->nr_ordered_extents--; 658 659 trace_btrfs_ordered_extent_remove(btrfs_inode, entry); 660 661 if (!root->nr_ordered_extents) { 662 spin_lock(&fs_info->ordered_root_lock); 663 BUG_ON(list_empty(&root->ordered_root)); 664 list_del_init(&root->ordered_root); 665 spin_unlock(&fs_info->ordered_root_lock); 666 } 667 spin_unlock(&root->ordered_extent_lock); 668 wake_up(&entry->wait); 669 if (!freespace_inode) 670 btrfs_lockdep_release(fs_info, btrfs_ordered_extent); 671 } 672 673 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 674 { 675 struct btrfs_ordered_extent *ordered; 676 677 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 678 btrfs_start_ordered_extent(ordered); 679 complete(&ordered->completion); 680 } 681 682 /* 683 * wait for all the ordered extents in a root. This is done when balancing 684 * space between drives. 685 */ 686 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, 687 const u64 range_start, const u64 range_len) 688 { 689 struct btrfs_fs_info *fs_info = root->fs_info; 690 LIST_HEAD(splice); 691 LIST_HEAD(skipped); 692 LIST_HEAD(works); 693 struct btrfs_ordered_extent *ordered, *next; 694 u64 count = 0; 695 const u64 range_end = range_start + range_len; 696 697 mutex_lock(&root->ordered_extent_mutex); 698 spin_lock(&root->ordered_extent_lock); 699 list_splice_init(&root->ordered_extents, &splice); 700 while (!list_empty(&splice) && nr) { 701 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 702 root_extent_list); 703 704 if (range_end <= ordered->disk_bytenr || 705 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { 706 list_move_tail(&ordered->root_extent_list, &skipped); 707 cond_resched_lock(&root->ordered_extent_lock); 708 continue; 709 } 710 711 list_move_tail(&ordered->root_extent_list, 712 &root->ordered_extents); 713 refcount_inc(&ordered->refs); 714 spin_unlock(&root->ordered_extent_lock); 715 716 btrfs_init_work(&ordered->flush_work, 717 btrfs_run_ordered_extent_work, NULL, NULL); 718 list_add_tail(&ordered->work_list, &works); 719 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 720 721 cond_resched(); 722 spin_lock(&root->ordered_extent_lock); 723 if (nr != U64_MAX) 724 nr--; 725 count++; 726 } 727 list_splice_tail(&skipped, &root->ordered_extents); 728 list_splice_tail(&splice, &root->ordered_extents); 729 spin_unlock(&root->ordered_extent_lock); 730 731 list_for_each_entry_safe(ordered, next, &works, work_list) { 732 list_del_init(&ordered->work_list); 733 wait_for_completion(&ordered->completion); 734 btrfs_put_ordered_extent(ordered); 735 cond_resched(); 736 } 737 mutex_unlock(&root->ordered_extent_mutex); 738 739 return count; 740 } 741 742 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, 743 const u64 range_start, const u64 range_len) 744 { 745 struct btrfs_root *root; 746 LIST_HEAD(splice); 747 u64 done; 748 749 mutex_lock(&fs_info->ordered_operations_mutex); 750 spin_lock(&fs_info->ordered_root_lock); 751 list_splice_init(&fs_info->ordered_roots, &splice); 752 while (!list_empty(&splice) && nr) { 753 root = list_first_entry(&splice, struct btrfs_root, 754 ordered_root); 755 root = btrfs_grab_root(root); 756 BUG_ON(!root); 757 list_move_tail(&root->ordered_root, 758 &fs_info->ordered_roots); 759 spin_unlock(&fs_info->ordered_root_lock); 760 761 done = btrfs_wait_ordered_extents(root, nr, 762 range_start, range_len); 763 btrfs_put_root(root); 764 765 spin_lock(&fs_info->ordered_root_lock); 766 if (nr != U64_MAX) { 767 nr -= done; 768 } 769 } 770 list_splice_tail(&splice, &fs_info->ordered_roots); 771 spin_unlock(&fs_info->ordered_root_lock); 772 mutex_unlock(&fs_info->ordered_operations_mutex); 773 } 774 775 /* 776 * Start IO and wait for a given ordered extent to finish. 777 * 778 * Wait on page writeback for all the pages in the extent and the IO completion 779 * code to insert metadata into the btree corresponding to the extent. 780 */ 781 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry) 782 { 783 u64 start = entry->file_offset; 784 u64 end = start + entry->num_bytes - 1; 785 struct btrfs_inode *inode = BTRFS_I(entry->inode); 786 bool freespace_inode; 787 788 trace_btrfs_ordered_extent_start(inode, entry); 789 790 /* 791 * If this is a free space inode do not take the ordered extents lockdep 792 * map. 793 */ 794 freespace_inode = btrfs_is_free_space_inode(inode); 795 796 /* 797 * pages in the range can be dirty, clean or writeback. We 798 * start IO on any dirty ones so the wait doesn't stall waiting 799 * for the flusher thread to find them 800 */ 801 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 802 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end); 803 804 if (!freespace_inode) 805 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent); 806 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); 807 } 808 809 /* 810 * Used to wait on ordered extents across a large range of bytes. 811 */ 812 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 813 { 814 int ret = 0; 815 int ret_wb = 0; 816 u64 end; 817 u64 orig_end; 818 struct btrfs_ordered_extent *ordered; 819 820 if (start + len < start) { 821 orig_end = OFFSET_MAX; 822 } else { 823 orig_end = start + len - 1; 824 if (orig_end > OFFSET_MAX) 825 orig_end = OFFSET_MAX; 826 } 827 828 /* start IO across the range first to instantiate any delalloc 829 * extents 830 */ 831 ret = btrfs_fdatawrite_range(inode, start, orig_end); 832 if (ret) 833 return ret; 834 835 /* 836 * If we have a writeback error don't return immediately. Wait first 837 * for any ordered extents that haven't completed yet. This is to make 838 * sure no one can dirty the same page ranges and call writepages() 839 * before the ordered extents complete - to avoid failures (-EEXIST) 840 * when adding the new ordered extents to the ordered tree. 841 */ 842 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 843 844 end = orig_end; 845 while (1) { 846 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end); 847 if (!ordered) 848 break; 849 if (ordered->file_offset > orig_end) { 850 btrfs_put_ordered_extent(ordered); 851 break; 852 } 853 if (ordered->file_offset + ordered->num_bytes <= start) { 854 btrfs_put_ordered_extent(ordered); 855 break; 856 } 857 btrfs_start_ordered_extent(ordered); 858 end = ordered->file_offset; 859 /* 860 * If the ordered extent had an error save the error but don't 861 * exit without waiting first for all other ordered extents in 862 * the range to complete. 863 */ 864 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 865 ret = -EIO; 866 btrfs_put_ordered_extent(ordered); 867 if (end == 0 || end == start) 868 break; 869 end--; 870 } 871 return ret_wb ? ret_wb : ret; 872 } 873 874 /* 875 * find an ordered extent corresponding to file_offset. return NULL if 876 * nothing is found, otherwise take a reference on the extent and return it 877 */ 878 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, 879 u64 file_offset) 880 { 881 struct btrfs_ordered_inode_tree *tree; 882 struct rb_node *node; 883 struct btrfs_ordered_extent *entry = NULL; 884 unsigned long flags; 885 886 tree = &inode->ordered_tree; 887 spin_lock_irqsave(&tree->lock, flags); 888 node = tree_search(tree, file_offset); 889 if (!node) 890 goto out; 891 892 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 893 if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) 894 entry = NULL; 895 if (entry) { 896 refcount_inc(&entry->refs); 897 trace_btrfs_ordered_extent_lookup(inode, entry); 898 } 899 out: 900 spin_unlock_irqrestore(&tree->lock, flags); 901 return entry; 902 } 903 904 /* Since the DIO code tries to lock a wide area we need to look for any ordered 905 * extents that exist in the range, rather than just the start of the range. 906 */ 907 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 908 struct btrfs_inode *inode, u64 file_offset, u64 len) 909 { 910 struct btrfs_ordered_inode_tree *tree; 911 struct rb_node *node; 912 struct btrfs_ordered_extent *entry = NULL; 913 914 tree = &inode->ordered_tree; 915 spin_lock_irq(&tree->lock); 916 node = tree_search(tree, file_offset); 917 if (!node) { 918 node = tree_search(tree, file_offset + len); 919 if (!node) 920 goto out; 921 } 922 923 while (1) { 924 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 925 if (range_overlaps(entry, file_offset, len)) 926 break; 927 928 if (entry->file_offset >= file_offset + len) { 929 entry = NULL; 930 break; 931 } 932 entry = NULL; 933 node = rb_next(node); 934 if (!node) 935 break; 936 } 937 out: 938 if (entry) { 939 refcount_inc(&entry->refs); 940 trace_btrfs_ordered_extent_lookup_range(inode, entry); 941 } 942 spin_unlock_irq(&tree->lock); 943 return entry; 944 } 945 946 /* 947 * Adds all ordered extents to the given list. The list ends up sorted by the 948 * file_offset of the ordered extents. 949 */ 950 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode, 951 struct list_head *list) 952 { 953 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 954 struct rb_node *n; 955 956 ASSERT(inode_is_locked(&inode->vfs_inode)); 957 958 spin_lock_irq(&tree->lock); 959 for (n = rb_first(&tree->tree); n; n = rb_next(n)) { 960 struct btrfs_ordered_extent *ordered; 961 962 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 963 964 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) 965 continue; 966 967 ASSERT(list_empty(&ordered->log_list)); 968 list_add_tail(&ordered->log_list, list); 969 refcount_inc(&ordered->refs); 970 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered); 971 } 972 spin_unlock_irq(&tree->lock); 973 } 974 975 /* 976 * lookup and return any extent before 'file_offset'. NULL is returned 977 * if none is found 978 */ 979 struct btrfs_ordered_extent * 980 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset) 981 { 982 struct btrfs_ordered_inode_tree *tree; 983 struct rb_node *node; 984 struct btrfs_ordered_extent *entry = NULL; 985 986 tree = &inode->ordered_tree; 987 spin_lock_irq(&tree->lock); 988 node = tree_search(tree, file_offset); 989 if (!node) 990 goto out; 991 992 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 993 refcount_inc(&entry->refs); 994 trace_btrfs_ordered_extent_lookup_first(inode, entry); 995 out: 996 spin_unlock_irq(&tree->lock); 997 return entry; 998 } 999 1000 /* 1001 * Lookup the first ordered extent that overlaps the range 1002 * [@file_offset, @file_offset + @len). 1003 * 1004 * The difference between this and btrfs_lookup_first_ordered_extent() is 1005 * that this one won't return any ordered extent that does not overlap the range. 1006 * And the difference against btrfs_lookup_ordered_extent() is, this function 1007 * ensures the first ordered extent gets returned. 1008 */ 1009 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range( 1010 struct btrfs_inode *inode, u64 file_offset, u64 len) 1011 { 1012 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 1013 struct rb_node *node; 1014 struct rb_node *cur; 1015 struct rb_node *prev; 1016 struct rb_node *next; 1017 struct btrfs_ordered_extent *entry = NULL; 1018 1019 spin_lock_irq(&tree->lock); 1020 node = tree->tree.rb_node; 1021 /* 1022 * Here we don't want to use tree_search() which will use tree->last 1023 * and screw up the search order. 1024 * And __tree_search() can't return the adjacent ordered extents 1025 * either, thus here we do our own search. 1026 */ 1027 while (node) { 1028 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 1029 1030 if (file_offset < entry->file_offset) { 1031 node = node->rb_left; 1032 } else if (file_offset >= entry_end(entry)) { 1033 node = node->rb_right; 1034 } else { 1035 /* 1036 * Direct hit, got an ordered extent that starts at 1037 * @file_offset 1038 */ 1039 goto out; 1040 } 1041 } 1042 if (!entry) { 1043 /* Empty tree */ 1044 goto out; 1045 } 1046 1047 cur = &entry->rb_node; 1048 /* We got an entry around @file_offset, check adjacent entries */ 1049 if (entry->file_offset < file_offset) { 1050 prev = cur; 1051 next = rb_next(cur); 1052 } else { 1053 prev = rb_prev(cur); 1054 next = cur; 1055 } 1056 if (prev) { 1057 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); 1058 if (range_overlaps(entry, file_offset, len)) 1059 goto out; 1060 } 1061 if (next) { 1062 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); 1063 if (range_overlaps(entry, file_offset, len)) 1064 goto out; 1065 } 1066 /* No ordered extent in the range */ 1067 entry = NULL; 1068 out: 1069 if (entry) { 1070 refcount_inc(&entry->refs); 1071 trace_btrfs_ordered_extent_lookup_first_range(inode, entry); 1072 } 1073 1074 spin_unlock_irq(&tree->lock); 1075 return entry; 1076 } 1077 1078 /* 1079 * Lock the passed range and ensures all pending ordered extents in it are run 1080 * to completion. 1081 * 1082 * @inode: Inode whose ordered tree is to be searched 1083 * @start: Beginning of range to flush 1084 * @end: Last byte of range to lock 1085 * @cached_state: If passed, will return the extent state responsible for the 1086 * locked range. It's the caller's responsibility to free the 1087 * cached state. 1088 * 1089 * Always return with the given range locked, ensuring after it's called no 1090 * order extent can be pending. 1091 */ 1092 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, 1093 u64 end, 1094 struct extent_state **cached_state) 1095 { 1096 struct btrfs_ordered_extent *ordered; 1097 struct extent_state *cache = NULL; 1098 struct extent_state **cachedp = &cache; 1099 1100 if (cached_state) 1101 cachedp = cached_state; 1102 1103 while (1) { 1104 lock_extent(&inode->io_tree, start, end, cachedp); 1105 ordered = btrfs_lookup_ordered_range(inode, start, 1106 end - start + 1); 1107 if (!ordered) { 1108 /* 1109 * If no external cached_state has been passed then 1110 * decrement the extra ref taken for cachedp since we 1111 * aren't exposing it outside of this function 1112 */ 1113 if (!cached_state) 1114 refcount_dec(&cache->refs); 1115 break; 1116 } 1117 unlock_extent(&inode->io_tree, start, end, cachedp); 1118 btrfs_start_ordered_extent(ordered); 1119 btrfs_put_ordered_extent(ordered); 1120 } 1121 } 1122 1123 /* 1124 * Lock the passed range and ensure all pending ordered extents in it are run 1125 * to completion in nowait mode. 1126 * 1127 * Return true if btrfs_lock_ordered_range does not return any extents, 1128 * otherwise false. 1129 */ 1130 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end, 1131 struct extent_state **cached_state) 1132 { 1133 struct btrfs_ordered_extent *ordered; 1134 1135 if (!try_lock_extent(&inode->io_tree, start, end, cached_state)) 1136 return false; 1137 1138 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); 1139 if (!ordered) 1140 return true; 1141 1142 btrfs_put_ordered_extent(ordered); 1143 unlock_extent(&inode->io_tree, start, end, cached_state); 1144 1145 return false; 1146 } 1147 1148 /* Split out a new ordered extent for this first @len bytes of @ordered. */ 1149 struct btrfs_ordered_extent *btrfs_split_ordered_extent( 1150 struct btrfs_ordered_extent *ordered, u64 len) 1151 { 1152 struct btrfs_inode *inode = BTRFS_I(ordered->inode); 1153 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 1154 struct btrfs_root *root = inode->root; 1155 struct btrfs_fs_info *fs_info = root->fs_info; 1156 u64 file_offset = ordered->file_offset; 1157 u64 disk_bytenr = ordered->disk_bytenr; 1158 unsigned long flags = ordered->flags; 1159 struct btrfs_ordered_sum *sum, *tmpsum; 1160 struct btrfs_ordered_extent *new; 1161 struct rb_node *node; 1162 u64 offset = 0; 1163 1164 trace_btrfs_ordered_extent_split(inode, ordered); 1165 1166 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED))); 1167 1168 /* 1169 * The entire bio must be covered by the ordered extent, but we can't 1170 * reduce the original extent to a zero length either. 1171 */ 1172 if (WARN_ON_ONCE(len >= ordered->num_bytes)) 1173 return ERR_PTR(-EINVAL); 1174 /* We cannot split partially completed ordered extents. */ 1175 if (ordered->bytes_left) { 1176 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS)); 1177 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) 1178 return ERR_PTR(-EINVAL); 1179 } 1180 /* We cannot split a compressed ordered extent. */ 1181 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) 1182 return ERR_PTR(-EINVAL); 1183 1184 new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr, 1185 len, 0, flags, ordered->compress_type); 1186 if (IS_ERR(new)) 1187 return new; 1188 1189 /* One ref for the tree. */ 1190 refcount_inc(&new->refs); 1191 1192 spin_lock_irq(&root->ordered_extent_lock); 1193 spin_lock(&tree->lock); 1194 /* Remove from tree once */ 1195 node = &ordered->rb_node; 1196 rb_erase(node, &tree->tree); 1197 RB_CLEAR_NODE(node); 1198 if (tree->last == node) 1199 tree->last = NULL; 1200 1201 ordered->file_offset += len; 1202 ordered->disk_bytenr += len; 1203 ordered->num_bytes -= len; 1204 ordered->disk_num_bytes -= len; 1205 1206 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { 1207 ASSERT(ordered->bytes_left == 0); 1208 new->bytes_left = 0; 1209 } else { 1210 ordered->bytes_left -= len; 1211 } 1212 1213 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) { 1214 if (ordered->truncated_len > len) { 1215 ordered->truncated_len -= len; 1216 } else { 1217 new->truncated_len = ordered->truncated_len; 1218 ordered->truncated_len = 0; 1219 } 1220 } 1221 1222 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) { 1223 if (offset == len) 1224 break; 1225 list_move_tail(&sum->list, &new->list); 1226 offset += sum->len; 1227 } 1228 1229 /* Re-insert the node */ 1230 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node); 1231 if (node) 1232 btrfs_panic(fs_info, -EEXIST, 1233 "zoned: inconsistency in ordered tree at offset %llu", 1234 ordered->file_offset); 1235 1236 node = tree_insert(&tree->tree, new->file_offset, &new->rb_node); 1237 if (node) 1238 btrfs_panic(fs_info, -EEXIST, 1239 "zoned: inconsistency in ordered tree at offset %llu", 1240 new->file_offset); 1241 spin_unlock(&tree->lock); 1242 1243 list_add_tail(&new->root_extent_list, &root->ordered_extents); 1244 root->nr_ordered_extents++; 1245 spin_unlock_irq(&root->ordered_extent_lock); 1246 return new; 1247 } 1248 1249 int __init ordered_data_init(void) 1250 { 1251 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1252 sizeof(struct btrfs_ordered_extent), 0, 1253 SLAB_MEM_SPREAD, 1254 NULL); 1255 if (!btrfs_ordered_extent_cache) 1256 return -ENOMEM; 1257 1258 return 0; 1259 } 1260 1261 void __cold ordered_data_exit(void) 1262 { 1263 kmem_cache_destroy(btrfs_ordered_extent_cache); 1264 } 1265