Lines Matching +full:1 +full:eb
44 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb) in btrfs_leak_debug_add_eb() argument
46 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
50 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
54 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) in btrfs_leak_debug_del_eb() argument
56 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
60 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
66 struct extent_buffer *eb; in btrfs_extent_buffer_leak_debug_check() local
79 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
83 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
84 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
85 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
86 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
91 #define btrfs_leak_debug_add_eb(eb) do {} while (0) argument
92 #define btrfs_leak_debug_del_eb(eb) do {} while (0) argument
190 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); in process_one_page()
191 len = end + 1 - start; in process_one_page()
276 u32 len = end + 1 - start; in lock_delalloc_pages()
291 processed_end = page_offset(page) + PAGE_SIZE - 1; in lock_delalloc_pages()
353 /* @delalloc_end can be -1, never go beyond @orig_end */ in find_lock_delalloc_range()
370 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
371 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
385 loops = 1; in find_lock_delalloc_range()
398 EXTENT_DELALLOC, 1, cached_state); in find_lock_delalloc_range()
541 * Thus we need to do processed->end + 1 >= start check in endio_readpage_release_extent()
544 processed->end + 1 >= start && end >= processed->end) { in endio_readpage_release_extent()
631 end = start + bvec->bv_len - 1; in end_bio_extent_readpage()
652 offset_in_page(end) + 1); in end_bio_extent_readpage()
720 * 1) The pages are belonging to the same inode in btrfs_bio_is_contig()
855 static int attach_extent_buffer_page(struct extent_buffer *eb, in attach_extent_buffer_page() argument
859 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_page()
873 attach_page_private(page, eb); in attach_extent_buffer_page()
875 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
968 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
1006 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1011 end - cur + 1, em_cached); in btrfs_do_readpage()
1014 end_page_read(page, false, cur, end + 1 - cur); in btrfs_do_readpage()
1024 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
1069 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
1083 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1091 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1119 u64 end = start + PAGE_SIZE - 1; in btrfs_read_folio()
1155 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1166 const u64 page_end = page_start + PAGE_SIZE - 1; in writepage_delalloc()
1176 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1185 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1193 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE); in writepage_delalloc()
1199 if (ret == 1) { in writepage_delalloc()
1201 return 1; in writepage_delalloc()
1272 * We return 1 if the IO is started and the page is unlocked,
1284 u64 end = cur + PAGE_SIZE - 1; in __extent_writepage_io()
1296 return 1; in __extent_writepage_io()
1301 u32 len = end - cur + 1; in __extent_writepage_io()
1354 iosize = min(min(em_end, end + 1), dirty_range_end) - cur; in __extent_writepage_io()
1358 btrfs_set_range_writeback(inode, cur, cur + iosize - 1); in __extent_writepage_io()
1432 if (ret == 1) in __extent_writepage()
1438 if (ret == 1) in __extent_writepage()
1459 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) in wait_on_extent_buffer_writeback() argument
1461 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
1472 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
1475 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
1478 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1479 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
1480 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1483 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
1484 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1488 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
1492 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
1493 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
1494 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
1495 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1496 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
1498 -eb->len, in lock_extent_buffer_for_io()
1502 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1504 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1508 static void set_btree_ioerr(struct extent_buffer *eb) in set_btree_ioerr() argument
1510 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
1512 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in set_btree_ioerr()
1518 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
1526 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); in set_btree_ioerr()
1552 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
1553 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
1566 switch (eb->log_index) { in set_btree_ioerr()
1567 case -1: in set_btree_ioerr()
1573 case 1: in set_btree_ioerr()
1588 struct extent_buffer *eb; in find_extent_buffer_nolock() local
1591 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_nolock()
1593 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer_nolock()
1595 return eb; in find_extent_buffer_nolock()
1603 struct extent_buffer *eb = bbio->private; in extent_buffer_write_end_io() local
1604 struct btrfs_fs_info *fs_info = eb->fs_info; in extent_buffer_write_end_io()
1611 set_btree_ioerr(eb); in extent_buffer_write_end_io()
1614 u64 start = eb->start + bio_offset; in extent_buffer_write_end_io()
1622 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in extent_buffer_write_end_io()
1624 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in extent_buffer_write_end_io()
1629 static void prepare_eb_write(struct extent_buffer *eb) in prepare_eb_write() argument
1635 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
1638 nritems = btrfs_header_nritems(eb); in prepare_eb_write()
1639 if (btrfs_header_level(eb) > 0) { in prepare_eb_write()
1640 end = btrfs_node_key_ptr_offset(eb, nritems); in prepare_eb_write()
1641 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
1645 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 in prepare_eb_write()
1647 start = btrfs_item_nr_offset(eb, nritems); in prepare_eb_write()
1648 end = btrfs_item_nr_offset(eb, 0); in prepare_eb_write()
1650 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); in prepare_eb_write()
1652 end += btrfs_item_offset(eb, nritems - 1); in prepare_eb_write()
1653 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
1657 static noinline_for_stack void write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
1660 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_eb()
1663 prepare_eb_write(eb); in write_one_eb()
1667 eb->fs_info, extent_buffer_write_end_io, eb); in write_one_eb()
1668 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in write_one_eb()
1671 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in write_one_eb()
1672 bbio->file_offset = eb->start; in write_one_eb()
1674 struct page *p = eb->pages[0]; in write_one_eb()
1677 btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len); in write_one_eb()
1678 if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start, in write_one_eb()
1679 eb->len)) { in write_one_eb()
1683 __bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p)); in write_one_eb()
1684 wbc_account_cgroup_owner(wbc, p, eb->len); in write_one_eb()
1687 for (int i = 0; i < num_extent_pages(eb); i++) { in write_one_eb()
1688 struct page *p = eb->pages[i]; in write_one_eb()
1727 struct extent_buffer *eb; in submit_eb_subpage() local
1753 * Here we just want to grab the eb without touching extra in submit_eb_subpage()
1756 eb = find_extent_buffer_nolock(fs_info, start); in submit_eb_subpage()
1761 * The eb has already reached 0 refs thus find_extent_buffer() in submit_eb_subpage()
1762 * doesn't return it. We don't need to write back such eb in submit_eb_subpage()
1765 if (!eb) in submit_eb_subpage()
1768 if (lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_subpage()
1769 write_one_eb(eb, wbc); in submit_eb_subpage()
1772 free_extent_buffer(eb); in submit_eb_subpage()
1782 * belongs to this eb, we don't need to submit
1801 struct extent_buffer *eb; in submit_eb_page() local
1816 eb = (struct extent_buffer *)page->private; in submit_eb_page()
1822 if (WARN_ON(!eb)) { in submit_eb_page()
1827 if (eb == ctx->eb) { in submit_eb_page()
1831 ret = atomic_inc_not_zero(&eb->refs); in submit_eb_page()
1836 ctx->eb = eb; in submit_eb_page()
1838 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx); in submit_eb_page()
1842 free_extent_buffer(eb); in submit_eb_page()
1846 if (!lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_page()
1847 free_extent_buffer(eb); in submit_eb_page()
1852 /* Mark the last eb in the block group. */ in submit_eb_page()
1853 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); in submit_eb_page()
1854 ctx->zoned_bg->meta_write_pointer += eb->len; in submit_eb_page()
1856 write_one_eb(eb, wbc); in submit_eb_page()
1857 free_extent_buffer(eb); in submit_eb_page()
1858 return 1; in submit_eb_page()
1879 end = -1; in btree_write_cache_pages()
1888 scanned = 1; in btree_write_cache_pages()
1910 done = 1; in btree_write_cache_pages()
1929 scanned = 1; in btree_write_cache_pages()
1957 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
2021 end = -1; in extent_write_cache_pages()
2031 range_whole = 1; in extent_write_cache_pages()
2032 scanned = 1; in extent_write_cache_pages()
2045 wbc->tagged_writepages = 1; in extent_write_cache_pages()
2101 done = 1; in extent_write_cache_pages()
2121 scanned = 1; in extent_write_cache_pages()
2165 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); in extent_write_locked_range()
2168 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); in extent_write_locked_range()
2169 u32 cur_len = cur_end + 1 - cur; in extent_write_locked_range()
2180 if (ret == 1) in extent_write_locked_range()
2198 cur = cur_end + 1; in extent_write_locked_range()
2230 u64 prev_em_start = (u64)-1; in extent_readahead()
2235 u64 contig_end = contig_start + readahead_batch_length(rac) - 1; in extent_readahead()
2256 u64 end = start + folio_size(folio) - 1; in extent_invalidate_folio()
2287 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
2288 int ret = 1; in try_release_extent_state()
2311 ret = 1; in try_release_extent_state()
2325 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
2337 len = end - start + 1; in try_release_extent_mapping()
2351 extent_map_end(em) - 1, in try_release_extent_mapping()
2413 * errno value and different from 1 because that's also a return value from
2417 #define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2478 * Ignore 1 (reached max entries) because we keep track of that in flush_fiemap_cache()
2522 * file extent item for file range [512K, 1M[, and after in emit_fiemap_extent()
2585 * 1) The file extent item's range ends at or behind the in emit_fiemap_extent()
2615 * 1) Their logical addresses are continuous in emit_fiemap_extent()
2642 entry = &cache->entries[cache->entries_size - 1]; in emit_fiemap_extent()
2659 return 1; in emit_fiemap_extent()
2719 return 1; in fiemap_next_leaf_item()
2737 * Returns: 0 on success, < 0 on error, 1 if not found.
2758 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); in fiemap_search_slot()
2770 return 1; in fiemap_search_slot()
2849 prealloc_start = last_delalloc_end + 1; in fiemap_process_hole()
2876 delalloc_end + 1 - delalloc_start, in fiemap_process_hole()
2883 cur_offset = delalloc_end + 1; in fiemap_process_hole()
2898 prealloc_len = end + 1 - start; in fiemap_process_hole()
2900 prealloc_start = last_delalloc_end + 1; in fiemap_process_hole()
2901 prealloc_len = end + 1 - prealloc_start; in fiemap_process_hole()
2940 ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0); in fiemap_find_last_extent_offset()
2941 /* There can't be a file extent item at offset (u64)-1 */ in fiemap_find_last_extent_offset()
3083 const u64 hole_end = min(key.offset, range_end) - 1; in extent_fiemap()
3131 extent_end - 1); in extent_fiemap()
3137 key.offset, extent_end - 1); in extent_fiemap()
3185 0, 0, 0, prev_extent_end, range_end - 1); in extent_fiemap()
3201 i_size - 1, in extent_fiemap()
3250 static void __free_extent_buffer(struct extent_buffer *eb) in __free_extent_buffer() argument
3252 kmem_cache_free(extent_buffer_cache, eb); in __free_extent_buffer()
3255 static int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
3257 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
3258 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
3272 * Even there is no eb refs here, we may still have in page_range_has_eb()
3281 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page) in detach_extent_buffer_page() argument
3283 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_page()
3284 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_page()
3287 * For mapped eb, we're going to change the page private, which should in detach_extent_buffer_page()
3302 * removed the eb from the radix tree, so we could race in detach_extent_buffer_page()
3303 * and have this page now attached to the new eb. So in detach_extent_buffer_page()
3305 * this eb. in detach_extent_buffer_page()
3308 page->private == (unsigned long)eb) { in detach_extent_buffer_page()
3309 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_page()
3314 * to a new eb. in detach_extent_buffer_page()
3324 * For subpage, we can have dummy eb with page private. In this case, in detach_extent_buffer_page()
3326 * one dummy eb, no sharing. in detach_extent_buffer_page()
3346 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) in btrfs_release_extent_buffer_pages() argument
3351 ASSERT(!extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_pages()
3353 num_pages = num_extent_pages(eb); in btrfs_release_extent_buffer_pages()
3355 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages()
3360 detach_extent_buffer_page(eb, page); in btrfs_release_extent_buffer_pages()
3370 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
3372 btrfs_release_extent_buffer_pages(eb); in btrfs_release_extent_buffer()
3373 btrfs_leak_debug_del_eb(eb); in btrfs_release_extent_buffer()
3374 __free_extent_buffer(eb); in btrfs_release_extent_buffer()
3381 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
3383 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
3384 eb->start = start; in __alloc_extent_buffer()
3385 eb->len = len; in __alloc_extent_buffer()
3386 eb->fs_info = fs_info; in __alloc_extent_buffer()
3387 init_rwsem(&eb->lock); in __alloc_extent_buffer()
3389 btrfs_leak_debug_add_eb(eb); in __alloc_extent_buffer()
3391 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
3392 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
3396 return eb; in __alloc_extent_buffer()
3443 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
3448 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
3449 if (!eb) in __alloc_dummy_extent_buffer()
3452 num_pages = num_extent_pages(eb); in __alloc_dummy_extent_buffer()
3453 ret = btrfs_alloc_page_array(num_pages, eb->pages); in __alloc_dummy_extent_buffer()
3458 struct page *p = eb->pages[i]; in __alloc_dummy_extent_buffer()
3460 ret = attach_extent_buffer_page(eb, p, NULL); in __alloc_dummy_extent_buffer()
3465 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
3466 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
3467 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
3469 return eb; in __alloc_dummy_extent_buffer()
3472 if (eb->pages[i]) { in __alloc_dummy_extent_buffer()
3473 detach_extent_buffer_page(eb, eb->pages[i]); in __alloc_dummy_extent_buffer()
3474 __free_page(eb->pages[i]); in __alloc_dummy_extent_buffer()
3477 __free_extent_buffer(eb); in __alloc_dummy_extent_buffer()
3487 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
3513 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
3514 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3517 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
3518 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3519 atomic_inc(&eb->refs); in check_buffer_tree_ref()
3520 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
3523 static void mark_extent_buffer_accessed(struct extent_buffer *eb, in mark_extent_buffer_accessed() argument
3528 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
3530 num_pages = num_extent_pages(eb); in mark_extent_buffer_accessed()
3532 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
3542 struct extent_buffer *eb; in find_extent_buffer() local
3544 eb = find_extent_buffer_nolock(fs_info, start); in find_extent_buffer()
3545 if (!eb) in find_extent_buffer()
3548 * Lock our eb's refs_lock to avoid races with free_extent_buffer(). in find_extent_buffer()
3549 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and in find_extent_buffer()
3551 * set, eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
3555 * could race and increment the eb's reference count, clear its stale in find_extent_buffer()
3560 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
3561 spin_lock(&eb->refs_lock); in find_extent_buffer()
3562 spin_unlock(&eb->refs_lock); in find_extent_buffer()
3564 mark_extent_buffer_accessed(eb, NULL); in find_extent_buffer()
3565 return eb; in find_extent_buffer()
3572 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
3575 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
3576 if (eb) in alloc_test_extent_buffer()
3577 return eb; in alloc_test_extent_buffer()
3578 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
3579 if (!eb) in alloc_test_extent_buffer()
3581 eb->fs_info = fs_info; in alloc_test_extent_buffer()
3590 start >> fs_info->sectorsize_bits, eb); in alloc_test_extent_buffer()
3600 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
3601 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
3603 return eb; in alloc_test_extent_buffer()
3605 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
3628 * We could have already allocated an eb for this page and attached one in grab_extent_buffer()
3629 * so lets see if we can get a ref on the existing eb, and if we can we in grab_extent_buffer()
3673 struct extent_buffer *eb; in alloc_extent_buffer() local
3679 int uptodate = 1; in alloc_extent_buffer()
3696 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
3697 if (eb) in alloc_extent_buffer()
3698 return eb; in alloc_extent_buffer()
3700 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
3701 if (!eb) in alloc_extent_buffer()
3711 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); in alloc_extent_buffer()
3713 num_pages = num_extent_pages(eb); in alloc_extent_buffer()
3749 ret = attach_extent_buffer_page(eb, p, prealloc); in alloc_extent_buffer()
3752 * To inform we have extra eb under allocation, so that in alloc_extent_buffer()
3754 * when the eb hasn't yet been inserted into radix tree. in alloc_extent_buffer()
3756 * The ref will be decreased when the eb released the page, in in alloc_extent_buffer()
3763 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len)); in alloc_extent_buffer()
3764 eb->pages[i] = p; in alloc_extent_buffer()
3765 if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len)) in alloc_extent_buffer()
3777 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
3787 start >> fs_info->sectorsize_bits, eb); in alloc_extent_buffer()
3798 check_buffer_tree_ref(eb); in alloc_extent_buffer()
3799 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
3807 unlock_page(eb->pages[i]); in alloc_extent_buffer()
3808 return eb; in alloc_extent_buffer()
3811 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
3813 if (eb->pages[i]) in alloc_extent_buffer()
3814 unlock_page(eb->pages[i]); in alloc_extent_buffer()
3817 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
3823 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
3826 __free_extent_buffer(eb); in btrfs_release_extent_buffer_rcu()
3829 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
3830 __releases(&eb->refs_lock) in release_extent_buffer()
3832 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
3834 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
3835 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
3836 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
3837 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
3839 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3843 eb->start >> fs_info->sectorsize_bits); in release_extent_buffer()
3846 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3849 btrfs_leak_debug_del_eb(eb); in release_extent_buffer()
3851 btrfs_release_extent_buffer_pages(eb); in release_extent_buffer()
3853 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
3854 __free_extent_buffer(eb); in release_extent_buffer()
3855 return 1; in release_extent_buffer()
3858 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
3859 return 1; in release_extent_buffer()
3861 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3866 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
3869 if (!eb) in free_extent_buffer()
3872 refs = atomic_read(&eb->refs); in free_extent_buffer()
3873 while (1) { in free_extent_buffer()
3874 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
3875 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
3876 refs == 1)) in free_extent_buffer()
3878 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) in free_extent_buffer()
3882 spin_lock(&eb->refs_lock); in free_extent_buffer()
3883 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
3884 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
3885 !extent_buffer_under_io(eb) && in free_extent_buffer()
3886 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
3887 atomic_dec(&eb->refs); in free_extent_buffer()
3893 release_extent_buffer(eb); in free_extent_buffer()
3896 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
3898 if (!eb) in free_extent_buffer_stale()
3901 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
3902 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
3904 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
3905 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
3906 atomic_dec(&eb->refs); in free_extent_buffer_stale()
3907 release_extent_buffer(eb); in free_extent_buffer_stale()
3922 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb) in clear_subpage_extent_buffer_dirty() argument
3924 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_subpage_extent_buffer_dirty()
3925 struct page *page = eb->pages[0]; in clear_subpage_extent_buffer_dirty()
3930 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start, in clear_subpage_extent_buffer_dirty()
3931 eb->len); in clear_subpage_extent_buffer_dirty()
3935 WARN_ON(atomic_read(&eb->refs) == 0); in clear_subpage_extent_buffer_dirty()
3939 struct extent_buffer *eb) in btrfs_clear_buffer_dirty() argument
3941 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_clear_buffer_dirty()
3946 btrfs_assert_tree_write_locked(eb); in btrfs_clear_buffer_dirty()
3948 if (trans && btrfs_header_generation(eb) != trans->transid) in btrfs_clear_buffer_dirty()
3951 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) in btrfs_clear_buffer_dirty()
3954 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, in btrfs_clear_buffer_dirty()
3957 if (eb->fs_info->nodesize < PAGE_SIZE) in btrfs_clear_buffer_dirty()
3958 return clear_subpage_extent_buffer_dirty(eb); in btrfs_clear_buffer_dirty()
3960 num_pages = num_extent_pages(eb); in btrfs_clear_buffer_dirty()
3963 page = eb->pages[i]; in btrfs_clear_buffer_dirty()
3970 WARN_ON(atomic_read(&eb->refs) == 0); in btrfs_clear_buffer_dirty()
3973 void set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
3979 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
3981 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
3983 num_pages = num_extent_pages(eb); in set_extent_buffer_dirty()
3984 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
3985 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
3988 bool subpage = eb->fs_info->nodesize < PAGE_SIZE; in set_extent_buffer_dirty()
4002 lock_page(eb->pages[0]); in set_extent_buffer_dirty()
4004 btrfs_page_set_dirty(eb->fs_info, eb->pages[i], in set_extent_buffer_dirty()
4005 eb->start, eb->len); in set_extent_buffer_dirty()
4007 unlock_page(eb->pages[0]); in set_extent_buffer_dirty()
4008 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, in set_extent_buffer_dirty()
4009 eb->len, in set_extent_buffer_dirty()
4010 eb->fs_info->dirty_metadata_batch); in set_extent_buffer_dirty()
4014 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
4018 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
4020 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_extent_buffer_uptodate()
4025 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
4026 num_pages = num_extent_pages(eb); in clear_extent_buffer_uptodate()
4028 page = eb->pages[i]; in clear_extent_buffer_uptodate()
4039 btrfs_subpage_clear_uptodate(fs_info, page, eb->start, in clear_extent_buffer_uptodate()
4040 eb->len); in clear_extent_buffer_uptodate()
4044 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
4046 struct btrfs_fs_info *fs_info = eb->fs_info; in set_extent_buffer_uptodate()
4051 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
4052 num_pages = num_extent_pages(eb); in set_extent_buffer_uptodate()
4054 page = eb->pages[i]; in set_extent_buffer_uptodate()
4063 btrfs_subpage_set_uptodate(fs_info, page, eb->start, in set_extent_buffer_uptodate()
4064 eb->len); in set_extent_buffer_uptodate()
4070 struct extent_buffer *eb = bbio->private; in extent_buffer_read_end_io() local
4071 struct btrfs_fs_info *fs_info = eb->fs_info; in extent_buffer_read_end_io()
4077 eb->read_mirror = bbio->mirror_num; in extent_buffer_read_end_io()
4080 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) in extent_buffer_read_end_io()
4084 set_extent_buffer_uptodate(eb); in extent_buffer_read_end_io()
4086 clear_extent_buffer_uptodate(eb); in extent_buffer_read_end_io()
4087 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in extent_buffer_read_end_io()
4091 u64 start = eb->start + bio_offset; in extent_buffer_read_end_io()
4103 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in extent_buffer_read_end_io()
4105 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in extent_buffer_read_end_io()
4106 free_extent_buffer(eb); in extent_buffer_read_end_io()
4111 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, in read_extent_buffer_pages() argument
4114 int num_pages = num_extent_pages(eb), i; in read_extent_buffer_pages()
4117 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
4125 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages()
4129 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) in read_extent_buffer_pages()
4135 * started and finished reading the same eb. In this case, UPTODATE in read_extent_buffer_pages()
4138 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) { in read_extent_buffer_pages()
4139 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in read_extent_buffer_pages()
4141 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in read_extent_buffer_pages()
4145 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
4146 eb->read_mirror = 0; in read_extent_buffer_pages()
4147 check_buffer_tree_ref(eb); in read_extent_buffer_pages()
4148 atomic_inc(&eb->refs); in read_extent_buffer_pages()
4151 REQ_OP_READ | REQ_META, eb->fs_info, in read_extent_buffer_pages()
4152 extent_buffer_read_end_io, eb); in read_extent_buffer_pages()
4153 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in read_extent_buffer_pages()
4154 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in read_extent_buffer_pages()
4155 bbio->file_offset = eb->start; in read_extent_buffer_pages()
4157 if (eb->fs_info->nodesize < PAGE_SIZE) { in read_extent_buffer_pages()
4158 __bio_add_page(&bbio->bio, eb->pages[0], eb->len, in read_extent_buffer_pages()
4159 eb->start - page_offset(eb->pages[0])); in read_extent_buffer_pages()
4162 __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0); in read_extent_buffer_pages()
4168 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); in read_extent_buffer_pages()
4169 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
4176 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
4179 btrfs_warn(eb->fs_info, in report_eb_range()
4180 "access to eb bytenr %llu len %lu out of range start %lu len %lu", in report_eb_range()
4181 eb->start, eb->len, start, len); in report_eb_range()
4189 * the eb.
4190 * NOTE: @start and @len are offset inside the eb, not logical address.
4194 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
4199 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
4200 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
4201 return report_eb_range(eb, start, len); in check_eb_range()
4206 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
4216 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
4225 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer()
4228 page = eb->pages[i]; in read_extent_buffer()
4241 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
4253 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
4254 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
4256 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer_to_user_nofault()
4259 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
4277 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
4288 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
4291 offset = get_eb_offset_in_page(eb, start); in memcmp_extent_buffer()
4294 page = eb->pages[i]; in memcmp_extent_buffer()
4315 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4317 static void assert_eb_page_uptodate(const struct extent_buffer *eb, in assert_eb_page_uptodate() argument
4320 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_page_uptodate()
4330 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in assert_eb_page_uptodate()
4335 eb->start, eb->len))) in assert_eb_page_uptodate()
4336 btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len); in assert_eb_page_uptodate()
4342 static void __write_extent_buffer(const struct extent_buffer *eb, in __write_extent_buffer() argument
4353 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __write_extent_buffer()
4355 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)); in __write_extent_buffer()
4357 if (check_eb_range(eb, start, len)) in __write_extent_buffer()
4360 offset = get_eb_offset_in_page(eb, start); in __write_extent_buffer()
4363 page = eb->pages[i]; in __write_extent_buffer()
4365 assert_eb_page_uptodate(eb, page); in __write_extent_buffer()
4381 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
4384 return __write_extent_buffer(eb, srcv, start, len, false); in write_extent_buffer()
4387 static void memset_extent_buffer(const struct extent_buffer *eb, int c, in memset_extent_buffer() argument
4394 unsigned int offset = get_eb_offset_in_page(eb, cur); in memset_extent_buffer()
4396 struct page *page = eb->pages[index]; in memset_extent_buffer()
4398 assert_eb_page_uptodate(eb, page); in memset_extent_buffer()
4405 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
4408 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
4410 return memset_extent_buffer(eb, 0, start, len); in memzero_extent_buffer()
4471 * @eb: the extent buffer
4481 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
4494 offset = start + offset_in_page(eb->start) + byte_offset; in eb_bitmap_offset()
4503 * @eb: the extent buffer
4507 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
4515 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
4516 page = eb->pages[i]; in extent_buffer_test_bit()
4517 assert_eb_page_uptodate(eb, page); in extent_buffer_test_bit()
4519 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
4522 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) in extent_buffer_get_byte() argument
4526 if (check_eb_range(eb, bytenr, 1)) in extent_buffer_get_byte()
4528 return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr); in extent_buffer_get_byte()
4532 * Set an area of a bitmap to 1.
4534 * @eb: the extent buffer
4539 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
4543 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_set()
4552 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_set()
4558 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_set()
4559 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_set()
4562 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_set()
4570 * @eb: the extent buffer
4575 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
4580 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_clear()
4589 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_clear()
4595 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_clear()
4596 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_clear()
4599 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_clear()
4639 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
4640 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
4664 cur = min_t(unsigned long, len, src_off_in_page + 1); in memmove_extent_buffer()
4665 cur = min(cur, dst_off_in_page + 1); in memmove_extent_buffer()
4668 cur + 1; in memmove_extent_buffer()
4669 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, in memmove_extent_buffer()
4672 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur, in memmove_extent_buffer()
4713 cur = gang[ret - 1]->start + gang[ret - 1]->len; in get_next_extent_buffer()
4727 struct extent_buffer *eb = NULL; in try_release_subpage_extent_buffer() local
4738 eb = get_next_extent_buffer(fs_info, page, cur); in try_release_subpage_extent_buffer()
4739 if (!eb) { in try_release_subpage_extent_buffer()
4740 /* No more eb in the page range after or at cur */ in try_release_subpage_extent_buffer()
4744 cur = eb->start + eb->len; in try_release_subpage_extent_buffer()
4747 * The same as try_release_extent_buffer(), to ensure the eb in try_release_subpage_extent_buffer()
4750 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4751 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
4752 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4759 * If tree ref isn't set then we know the ref on this eb is a in try_release_subpage_extent_buffer()
4760 * real ref, so just return, this eb will likely be freed soon in try_release_subpage_extent_buffer()
4763 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
4764 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4773 release_extent_buffer(eb); in try_release_subpage_extent_buffer()
4781 ret = 1; in try_release_subpage_extent_buffer()
4791 struct extent_buffer *eb; in try_release_extent_buffer() local
4803 return 1; in try_release_extent_buffer()
4806 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
4807 BUG_ON(!eb); in try_release_extent_buffer()
4811 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
4814 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
4815 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
4816 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4823 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
4826 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
4827 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4831 return release_extent_buffer(eb); in try_release_extent_buffer()
4838 * @owner_root: objectid of the root that owns this eb
4840 * @level: level for the eb
4843 * normal uptodate check of the eb, without checking the generation. If we have
4854 struct extent_buffer *eb; in btrfs_readahead_tree_block() local
4857 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); in btrfs_readahead_tree_block()
4858 if (IS_ERR(eb)) in btrfs_readahead_tree_block()
4861 if (btrfs_buffer_uptodate(eb, gen, 1)) { in btrfs_readahead_tree_block()
4862 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4866 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check); in btrfs_readahead_tree_block()
4868 free_extent_buffer_stale(eb); in btrfs_readahead_tree_block()
4870 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4887 btrfs_header_level(node) - 1); in btrfs_readahead_node_child()