Lines Matching +full:page +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/page-flags.h>
19 #include "extent-io-tree.h"
24 #include "check-integrity.h"
26 #include "rcu-string.h"
28 #include "disk-io.h"
31 #include "block-group.h"
35 #include "file-item.h"
37 #include "dev-replace.h"
46 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
49 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_add_eb()
50 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
51 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_add_eb()
56 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
59 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_del_eb()
60 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
61 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_del_eb()
73 if (!fs_info->allocated_ebs.next) in btrfs_extent_buffer_leak_debug_check()
76 WARN_ON(!list_empty(&fs_info->allocated_ebs)); in btrfs_extent_buffer_leak_debug_check()
77 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); in btrfs_extent_buffer_leak_debug_check()
78 while (!list_empty(&fs_info->allocated_ebs)) { in btrfs_extent_buffer_leak_debug_check()
79 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
83 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
85 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
88 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); in btrfs_extent_buffer_leak_debug_check()
110 struct btrfs_bio *bbio = bio_ctrl->bbio; in submit_one_bio()
116 ASSERT(bbio->bio.bi_iter.bi_size); in submit_one_bio()
118 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ && in submit_one_bio()
119 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) in submit_one_bio()
125 bio_ctrl->bbio = NULL; in submit_one_bio()
133 struct btrfs_bio *bbio = bio_ctrl->bbio; in submit_write_bio()
142 bio_ctrl->bbio = NULL; in submit_write_bio()
154 return -ENOMEM; in extent_buffer_init_cachep()
173 struct page *page; in extent_range_clear_dirty_for_io() local
176 page = find_get_page(inode->i_mapping, index); in extent_range_clear_dirty_for_io()
177 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_clear_dirty_for_io()
178 clear_page_dirty_for_io(page); in extent_range_clear_dirty_for_io()
179 put_page(page); in extent_range_clear_dirty_for_io()
185 struct page *page, struct page *locked_page, in process_one_page() argument
190 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); in process_one_page()
191 len = end + 1 - start; in process_one_page()
194 btrfs_page_clamp_set_ordered(fs_info, page, start, len); in process_one_page()
196 btrfs_page_clamp_clear_dirty(fs_info, page, start, len); in process_one_page()
197 btrfs_page_clamp_set_writeback(fs_info, page, start, len); in process_one_page()
200 btrfs_page_clamp_clear_writeback(fs_info, page, start, len); in process_one_page()
202 if (page != locked_page && (page_ops & PAGE_UNLOCK)) in process_one_page()
203 btrfs_page_end_writer_lock(fs_info, page, start, len); in process_one_page()
207 struct page *locked_page, u64 start, u64 end, in __process_pages_contig()
210 struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb); in __process_pages_contig()
226 process_one_page(fs_info, &folio->page, locked_page, in __process_pages_contig()
235 struct page *locked_page, in __unlock_for_delalloc()
242 if (index == locked_page->index && end_index == index) in __unlock_for_delalloc()
245 __process_pages_contig(inode->i_mapping, locked_page, start, end, in __unlock_for_delalloc()
250 struct page *locked_page, in lock_delalloc_pages()
254 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in lock_delalloc_pages()
255 struct address_space *mapping = inode->i_mapping; in lock_delalloc_pages()
262 if (index == locked_page->index && index == end_index) in lock_delalloc_pages()
275 struct page *page = &fbatch.folios[i]->page; in lock_delalloc_pages() local
276 u32 len = end + 1 - start; in lock_delalloc_pages()
278 if (page == locked_page) in lock_delalloc_pages()
281 if (btrfs_page_start_writer_lock(fs_info, page, start, in lock_delalloc_pages()
285 if (!PageDirty(page) || page->mapping != mapping) { in lock_delalloc_pages()
286 btrfs_page_end_writer_lock(fs_info, page, start, in lock_delalloc_pages()
291 processed_end = page_offset(page) + PAGE_SIZE - 1; in lock_delalloc_pages()
302 return -EAGAIN; in lock_delalloc_pages()
318 * original range, and @start/@end will be the non-delalloc range start/end.
322 struct page *locked_page, u64 *start, in find_lock_delalloc_range()
325 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in find_lock_delalloc_range()
326 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in find_lock_delalloc_range()
330 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE; in find_lock_delalloc_range()
341 /* The range should at least cover part of the page */ in find_lock_delalloc_range()
353 /* @delalloc_end can be -1, never go beyond @orig_end */ in find_lock_delalloc_range()
360 * start comes from the offset of locked_page. We have to lock in find_lock_delalloc_range()
370 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
371 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
373 /* step two, lock all the pages after the page that has start */ in find_lock_delalloc_range()
376 ASSERT(!ret || ret == -EAGAIN); in find_lock_delalloc_range()
377 if (ret == -EAGAIN) { in find_lock_delalloc_range()
415 struct page *locked_page, in extent_clear_unlock_delalloc()
418 clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL); in extent_clear_unlock_delalloc()
420 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page, in extent_clear_unlock_delalloc()
424 static bool btrfs_verify_page(struct page *page, u64 start) in btrfs_verify_page() argument
426 if (!fsverity_active(page->mapping->host) || in btrfs_verify_page()
427 PageUptodate(page) || in btrfs_verify_page()
428 start >= i_size_read(page->mapping->host)) in btrfs_verify_page()
430 return fsverity_verify_page(page); in btrfs_verify_page()
433 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) in end_page_read() argument
435 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); in end_page_read()
437 ASSERT(page_offset(page) <= start && in end_page_read()
438 start + len <= page_offset(page) + PAGE_SIZE); in end_page_read()
440 if (uptodate && btrfs_verify_page(page, start)) in end_page_read()
441 btrfs_page_set_uptodate(fs_info, page, start, len); in end_page_read()
443 btrfs_page_clear_uptodate(fs_info, page, start, len); in end_page_read()
445 if (!btrfs_is_subpage(fs_info, page)) in end_page_read()
446 unlock_page(page); in end_page_read()
448 btrfs_subpage_end_reader(fs_info, page, start, len); in end_page_read()
455 * end_page_writeback if the page has no more pending IO
462 struct bio *bio = &bbio->bio; in end_bio_extent_writepage()
463 int error = blk_status_to_errno(bio->bi_status); in end_bio_extent_writepage()
469 struct page *page = bvec->bv_page; in end_bio_extent_writepage() local
470 struct inode *inode = page->mapping->host; in end_bio_extent_writepage()
471 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in end_bio_extent_writepage()
472 const u32 sectorsize = fs_info->sectorsize; in end_bio_extent_writepage()
473 u64 start = page_offset(page) + bvec->bv_offset; in end_bio_extent_writepage()
474 u32 len = bvec->bv_len; in end_bio_extent_writepage()
477 if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) in end_bio_extent_writepage()
479 "partial page write in btrfs with offset %u and length %u", in end_bio_extent_writepage()
480 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
481 else if (!IS_ALIGNED(bvec->bv_len, sectorsize)) in end_bio_extent_writepage()
483 "incomplete page write with offset %u and length %u", in end_bio_extent_writepage()
484 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
486 btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error); in end_bio_extent_writepage()
488 mapping_set_error(page->mapping, error); in end_bio_extent_writepage()
489 btrfs_page_clear_writeback(fs_info, page, start, len); in end_bio_extent_writepage()
529 if (!processed->inode) in endio_readpage_release_extent()
537 * - bio can be merged as long as on-disk bytenr is contiguous in endio_readpage_release_extent()
538 * This means we can have page belonging to other inodes, thus need to in endio_readpage_release_extent()
540 * - bvec can contain range beyond current page for multi-page bvec in endio_readpage_release_extent()
541 * Thus we need to do processed->end + 1 >= start check in endio_readpage_release_extent()
543 if (processed->inode == inode && processed->uptodate == uptodate && in endio_readpage_release_extent()
544 processed->end + 1 >= start && end >= processed->end) { in endio_readpage_release_extent()
545 processed->end = end; in endio_readpage_release_extent()
549 tree = &processed->inode->io_tree; in endio_readpage_release_extent()
554 unlock_extent(tree, processed->start, processed->end, &cached); in endio_readpage_release_extent()
558 processed->inode = inode; in endio_readpage_release_extent()
559 processed->start = start; in endio_readpage_release_extent()
560 processed->end = end; in endio_readpage_release_extent()
561 processed->uptodate = uptodate; in endio_readpage_release_extent()
564 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page) in begin_page_read() argument
566 ASSERT(PageLocked(page)); in begin_page_read()
567 if (!btrfs_is_subpage(fs_info, page)) in begin_page_read()
570 ASSERT(PagePrivate(page)); in begin_page_read()
571 btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE); in begin_page_read()
578 * set the page up to date if all extents in the tree are uptodate
580 * unlock the page if there are no other extents locked for it
587 struct bio *bio = &bbio->bio; in end_bio_extent_readpage()
591 * The offset to the beginning of a bio, since one bio can never be in end_bio_extent_readpage()
599 bool uptodate = !bio->bi_status; in end_bio_extent_readpage()
600 struct page *page = bvec->bv_page; in end_bio_extent_readpage() local
601 struct inode *inode = page->mapping->host; in end_bio_extent_readpage()
602 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in end_bio_extent_readpage()
603 const u32 sectorsize = fs_info->sectorsize; in end_bio_extent_readpage()
610 bio->bi_iter.bi_sector, bio->bi_status, in end_bio_extent_readpage()
611 bbio->mirror_num); in end_bio_extent_readpage()
614 * We always issue full-sector reads, but if some block in a in end_bio_extent_readpage()
615 * page fails to read, blk_update_request() will advance in end_bio_extent_readpage()
620 if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) in end_bio_extent_readpage()
622 "partial page read in btrfs with offset %u and length %u", in end_bio_extent_readpage()
623 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
624 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len, in end_bio_extent_readpage()
627 "incomplete page read with offset %u and length %u", in end_bio_extent_readpage()
628 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
630 start = page_offset(page) + bvec->bv_offset; in end_bio_extent_readpage()
631 end = start + bvec->bv_len - 1; in end_bio_extent_readpage()
632 len = bvec->bv_len; in end_bio_extent_readpage()
647 if (page->index == end_index && i_size <= end) { in end_bio_extent_readpage()
651 zero_user_segment(page, zero_start, in end_bio_extent_readpage()
656 /* Update page status and unlock. */ in end_bio_extent_readpage()
657 end_page_read(page, uptodate, start, len); in end_bio_extent_readpage()
674 * @page_array: the array to fill with pages; any existing non-null entries in
678 * -ENOMEM otherwise, the partially allocated pages would be freed and
681 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array) in btrfs_alloc_page_array()
695 return -ENOMEM; in btrfs_alloc_page_array()
702 struct page *page, u64 disk_bytenr, in btrfs_bio_is_contig() argument
705 struct bio *bio = &bio_ctrl->bbio->bio; in btrfs_bio_is_contig()
709 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) { in btrfs_bio_is_contig()
714 return bio->bi_iter.bi_sector == sector; in btrfs_bio_is_contig()
725 * 3) The range has adjacent file offset in btrfs_bio_is_contig()
726 * This is required for the usage of btrfs_bio->file_offset. in btrfs_bio_is_contig()
729 page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len == in btrfs_bio_is_contig()
730 page_offset(page) + pg_offset; in btrfs_bio_is_contig()
737 struct btrfs_fs_info *fs_info = inode->root->fs_info; in alloc_new_bio()
740 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info, in alloc_new_bio()
741 bio_ctrl->end_io_func, NULL); in alloc_new_bio()
742 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; in alloc_new_bio()
743 bbio->inode = inode; in alloc_new_bio()
744 bbio->file_offset = file_offset; in alloc_new_bio()
745 bio_ctrl->bbio = bbio; in alloc_new_bio()
746 bio_ctrl->len_to_oe_boundary = U32_MAX; in alloc_new_bio()
749 if (bio_ctrl->wbc) { in alloc_new_bio()
754 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX, in alloc_new_bio()
755 ordered->file_offset + in alloc_new_bio()
756 ordered->disk_num_bytes - file_offset); in alloc_new_bio()
757 bbio->ordered = ordered; in alloc_new_bio()
762 * multi-device file systems this means blk-cgroup policies have in alloc_new_bio()
766 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev); in alloc_new_bio()
767 wbc_init_bio(bio_ctrl->wbc, &bbio->bio); in alloc_new_bio()
773 * @page: page to add to the bio
774 * @size: portion of page that we want to write to
775 * @pg_offset: offset of the new bio or to check whether we are adding
776 * a contiguous page to the previous one
778 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
779 * new one in @bio_ctrl->bbio.
781 * @bio_ctrl->mirror_num.
784 u64 disk_bytenr, struct page *page, in submit_extent_page() argument
787 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in submit_extent_page()
790 ASSERT(bio_ctrl->end_io_func); in submit_extent_page()
792 if (bio_ctrl->bbio && in submit_extent_page()
793 !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset)) in submit_extent_page()
800 if (!bio_ctrl->bbio) { in submit_extent_page()
802 page_offset(page) + pg_offset); in submit_extent_page()
806 if (len > bio_ctrl->len_to_oe_boundary) { in submit_extent_page()
807 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE); in submit_extent_page()
808 ASSERT(is_data_inode(&inode->vfs_inode)); in submit_extent_page()
809 len = bio_ctrl->len_to_oe_boundary; in submit_extent_page()
812 if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) { in submit_extent_page()
818 if (bio_ctrl->wbc) in submit_extent_page()
819 wbc_account_cgroup_owner(bio_ctrl->wbc, page, len); in submit_extent_page()
821 size -= len; in submit_extent_page()
826 * len_to_oe_boundary defaults to U32_MAX, which isn't page or in submit_extent_page()
836 * result in a 4095 byte IO for the last page right before in submit_extent_page()
843 * happen when the page cache is able to feed us contiguous in submit_extent_page()
846 if (bio_ctrl->len_to_oe_boundary != U32_MAX) in submit_extent_page()
847 bio_ctrl->len_to_oe_boundary -= len; in submit_extent_page()
850 if (bio_ctrl->len_to_oe_boundary == 0) in submit_extent_page()
856 struct page *page, in attach_extent_buffer_page() argument
859 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_page()
863 * If the page is mapped to btree inode, we should hold the private in attach_extent_buffer_page()
868 if (page->mapping) in attach_extent_buffer_page()
869 lockdep_assert_held(&page->mapping->private_lock); in attach_extent_buffer_page()
871 if (fs_info->nodesize >= PAGE_SIZE) { in attach_extent_buffer_page()
872 if (!PagePrivate(page)) in attach_extent_buffer_page()
873 attach_page_private(page, eb); in attach_extent_buffer_page()
875 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
880 if (PagePrivate(page)) { in attach_extent_buffer_page()
887 attach_page_private(page, prealloc); in attach_extent_buffer_page()
890 ret = btrfs_attach_subpage(fs_info, page, in attach_extent_buffer_page()
895 int set_page_extent_mapped(struct page *page) in set_page_extent_mapped() argument
899 ASSERT(page->mapping); in set_page_extent_mapped()
901 if (PagePrivate(page)) in set_page_extent_mapped()
904 fs_info = btrfs_sb(page->mapping->host->i_sb); in set_page_extent_mapped()
906 if (btrfs_is_subpage(fs_info, page)) in set_page_extent_mapped()
907 return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA); in set_page_extent_mapped()
909 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE); in set_page_extent_mapped()
913 void clear_page_extent_mapped(struct page *page) in clear_page_extent_mapped() argument
917 ASSERT(page->mapping); in clear_page_extent_mapped()
919 if (!PagePrivate(page)) in clear_page_extent_mapped()
922 fs_info = btrfs_sb(page->mapping->host->i_sb); in clear_page_extent_mapped()
923 if (btrfs_is_subpage(fs_info, page)) in clear_page_extent_mapped()
924 return btrfs_detach_subpage(fs_info, page); in clear_page_extent_mapped()
926 detach_page_private(page); in clear_page_extent_mapped()
930 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, in __get_extent_map() argument
937 if (extent_map_in_tree(em) && start >= em->start && in __get_extent_map()
939 refcount_inc(&em->refs); in __get_extent_map()
947 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len); in __get_extent_map()
950 refcount_inc(&em->refs); in __get_extent_map()
959 * XXX JDM: This needs looking at to ensure proper page locking
962 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, in btrfs_do_readpage() argument
965 struct inode *inode = page->mapping->host; in btrfs_do_readpage()
966 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_do_readpage()
967 u64 start = page_offset(page); in btrfs_do_readpage()
968 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
977 size_t blocksize = fs_info->sectorsize; in btrfs_do_readpage()
978 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in btrfs_do_readpage()
980 ret = set_page_extent_mapped(page); in btrfs_do_readpage()
983 unlock_page(page); in btrfs_do_readpage()
987 if (page->index == last_byte >> PAGE_SHIFT) { in btrfs_do_readpage()
991 iosize = PAGE_SIZE - zero_offset; in btrfs_do_readpage()
992 memzero_page(page, zero_offset, iosize); in btrfs_do_readpage()
995 bio_ctrl->end_io_func = end_bio_extent_readpage; in btrfs_do_readpage()
996 begin_page_read(fs_info, page); in btrfs_do_readpage()
1002 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize)); in btrfs_do_readpage()
1004 iosize = PAGE_SIZE - pg_offset; in btrfs_do_readpage()
1005 memzero_page(page, pg_offset, iosize); in btrfs_do_readpage()
1006 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1007 end_page_read(page, true, cur, iosize); in btrfs_do_readpage()
1010 em = __get_extent_map(inode, page, pg_offset, cur, in btrfs_do_readpage()
1011 end - cur + 1, em_cached); in btrfs_do_readpage()
1014 end_page_read(page, false, cur, end + 1 - cur); in btrfs_do_readpage()
1017 extent_offset = cur - em->start; in btrfs_do_readpage()
1021 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) in btrfs_do_readpage()
1022 compress_type = em->compress_type; in btrfs_do_readpage()
1024 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
1027 disk_bytenr = em->block_start; in btrfs_do_readpage()
1029 disk_bytenr = em->block_start + extent_offset; in btrfs_do_readpage()
1030 block_start = em->block_start; in btrfs_do_readpage()
1031 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_do_readpage()
1038 * offset and/or length, so it either points to the whole extent in btrfs_do_readpage()
1045 * [0 - 8K] [8K - 24K] in btrfs_do_readpage()
1049 * offset 4K, length of 8K offset 0, length 16K in btrfs_do_readpage()
1063 * an extent map with a different offset value relative to the in btrfs_do_readpage()
1066 * non-optimal behavior (submitting 2 bios for the same extent). in btrfs_do_readpage()
1068 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && in btrfs_do_readpage()
1069 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
1070 *prev_em_start != em->start) in btrfs_do_readpage()
1074 *prev_em_start = em->start; in btrfs_do_readpage()
1081 memzero_page(page, pg_offset, iosize); in btrfs_do_readpage()
1083 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1084 end_page_read(page, true, cur, iosize); in btrfs_do_readpage()
1089 /* the get_extent function already copied into the page */ in btrfs_do_readpage()
1091 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1092 end_page_read(page, true, cur, iosize); in btrfs_do_readpage()
1098 if (bio_ctrl->compress_type != compress_type) { in btrfs_do_readpage()
1100 bio_ctrl->compress_type = compress_type; in btrfs_do_readpage()
1105 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize, in btrfs_do_readpage()
1116 struct page *page = &folio->page; in btrfs_read_folio() local
1117 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_read_folio()
1118 u64 start = page_offset(page); in btrfs_read_folio()
1119 u64 end = start + PAGE_SIZE - 1; in btrfs_read_folio()
1125 ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL); in btrfs_read_folio()
1134 static inline void contiguous_readpages(struct page *pages[], int nr_pages, in contiguous_readpages()
1140 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); in contiguous_readpages()
1156 * to write the page (copy into inline extent). In this case the IO has
1157 * been started and the page is already unlocked.
1159 * This returns 0 if all went well (page still locked)
1160 * This returns < 0 if there were errors (page still locked)
1163 struct page *page, struct writeback_control *wbc) in writepage_delalloc() argument
1165 const u64 page_start = page_offset(page); in writepage_delalloc()
1166 const u64 page_end = page_start + PAGE_SIZE - 1; in writepage_delalloc()
1174 if (!find_lock_delalloc_range(&inode->vfs_inode, page, in writepage_delalloc()
1180 ret = btrfs_run_delalloc_range(inode, page, delalloc_start, in writepage_delalloc()
1193 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE); in writepage_delalloc()
1200 wbc->nr_to_write -= delalloc_to_write; in writepage_delalloc()
1204 if (wbc->nr_to_write < delalloc_to_write) { in writepage_delalloc()
1209 wbc->nr_to_write = min_t(u64, delalloc_to_write, in writepage_delalloc()
1219 * For subpage, one page can contain several sectors, and
1220 * __extent_writepage_io() will just grab all extent maps in the page
1221 * range and try to submit all non-inline/non-compressed extents.
1223 * This is a big problem for subpage, we shouldn't re-submit already written
1229 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1232 struct page *page, u64 *start, u64 *end) in find_next_dirty_byte() argument
1234 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in find_next_dirty_byte()
1235 struct btrfs_subpage_info *spi = fs_info->subpage_info; in find_next_dirty_byte()
1243 * For regular sector size == page size case, since one page only in find_next_dirty_byte()
1244 * contains one sector, we return the page offset directly. in find_next_dirty_byte()
1246 if (!btrfs_is_subpage(fs_info, page)) { in find_next_dirty_byte()
1247 *start = page_offset(page); in find_next_dirty_byte()
1248 *end = page_offset(page) + PAGE_SIZE; in find_next_dirty_byte()
1252 range_start_bit = spi->dirty_offset + in find_next_dirty_byte()
1253 (offset_in_page(orig_start) >> fs_info->sectorsize_bits); in find_next_dirty_byte()
1255 /* We should have the page locked, but just in case */ in find_next_dirty_byte()
1256 spin_lock_irqsave(&subpage->lock, flags); in find_next_dirty_byte()
1257 bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit, in find_next_dirty_byte()
1258 spi->dirty_offset + spi->bitmap_nr_bits); in find_next_dirty_byte()
1259 spin_unlock_irqrestore(&subpage->lock, flags); in find_next_dirty_byte()
1261 range_start_bit -= spi->dirty_offset; in find_next_dirty_byte()
1262 range_end_bit -= spi->dirty_offset; in find_next_dirty_byte()
1264 *start = page_offset(page) + range_start_bit * fs_info->sectorsize; in find_next_dirty_byte()
1265 *end = page_offset(page) + range_end_bit * fs_info->sectorsize; in find_next_dirty_byte()
1270 * and does the loop to map the page into extents and bios.
1272 * We return 1 if the IO is started and the page is unlocked,
1273 * 0 if all went well (page still locked)
1274 * < 0 if there were errors (page still locked)
1277 struct page *page, in __extent_writepage_io() argument
1282 struct btrfs_fs_info *fs_info = inode->root->fs_info; in __extent_writepage_io()
1283 u64 cur = page_offset(page); in __extent_writepage_io()
1284 u64 end = cur + PAGE_SIZE - 1; in __extent_writepage_io()
1291 ret = btrfs_writepage_cow_fixup(page); in __extent_writepage_io()
1294 redirty_page_for_writepage(bio_ctrl->wbc, page); in __extent_writepage_io()
1295 unlock_page(page); in __extent_writepage_io()
1299 bio_ctrl->end_io_func = end_bio_extent_writepage; in __extent_writepage_io()
1301 u32 len = end - cur + 1; in __extent_writepage_io()
1309 btrfs_mark_ordered_io_finished(inode, page, cur, len, in __extent_writepage_io()
1315 * the next time the page gets dirtied, we will try to in __extent_writepage_io()
1319 btrfs_page_clear_dirty(fs_info, page, cur, len); in __extent_writepage_io()
1323 find_next_dirty_byte(fs_info, page, &dirty_range_start, in __extent_writepage_io()
1336 extent_offset = cur - em->start; in __extent_writepage_io()
1340 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize)); in __extent_writepage_io()
1341 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize)); in __extent_writepage_io()
1343 block_start = em->block_start; in __extent_writepage_io()
1344 disk_bytenr = em->block_start + extent_offset; in __extent_writepage_io()
1346 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); in __extent_writepage_io()
1354 iosize = min(min(em_end, end + 1), dirty_range_end) - cur; in __extent_writepage_io()
1358 btrfs_set_range_writeback(inode, cur, cur + iosize - 1); in __extent_writepage_io()
1359 if (!PageWriteback(page)) { in __extent_writepage_io()
1360 btrfs_err(inode->root->fs_info, in __extent_writepage_io()
1361 "page %lu not writeback, cur %llu end %llu", in __extent_writepage_io()
1362 page->index, cur, end); in __extent_writepage_io()
1369 * page for range already written to disk. in __extent_writepage_io()
1371 btrfs_page_clear_dirty(fs_info, page, cur, iosize); in __extent_writepage_io()
1373 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize, in __extent_writepage_io()
1374 cur - page_offset(page)); in __extent_writepage_io()
1379 btrfs_page_assert_not_dirty(fs_info, page); in __extent_writepage_io()
1385 * If we finish without problem, we should not only clear page dirty, in __extent_writepage_io()
1401 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) in __extent_writepage() argument
1403 struct folio *folio = page_folio(page); in __extent_writepage()
1404 struct inode *inode = page->mapping->host; in __extent_writepage()
1405 const u64 page_start = page_offset(page); in __extent_writepage()
1412 trace___extent_writepage(page, inode, bio_ctrl->wbc); in __extent_writepage()
1414 WARN_ON(!PageLocked(page)); in __extent_writepage()
1417 if (page->index > end_index || in __extent_writepage()
1418 (page->index == end_index && !pg_offset)) { in __extent_writepage()
1424 if (page->index == end_index) in __extent_writepage()
1425 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset); in __extent_writepage()
1427 ret = set_page_extent_mapped(page); in __extent_writepage()
1431 ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc); in __extent_writepage()
1437 ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr); in __extent_writepage()
1441 bio_ctrl->wbc->nr_to_write--; in __extent_writepage()
1445 /* make sure the mapping tag for page dirty gets cleared */ in __extent_writepage()
1446 set_page_writeback(page); in __extent_writepage()
1447 end_page_writeback(page); in __extent_writepage()
1450 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start, in __extent_writepage()
1452 mapping_set_error(page->mapping, ret); in __extent_writepage()
1454 unlock_page(page); in __extent_writepage()
1461 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
1475 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
1479 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
1481 if (wbc->sync_mode != WB_SYNC_ALL) in lock_extent_buffer_for_io()
1492 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
1493 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
1494 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
1495 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1497 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, in lock_extent_buffer_for_io()
1498 -eb->len, in lock_extent_buffer_for_io()
1499 fs_info->dirty_metadata_batch); in lock_extent_buffer_for_io()
1502 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1510 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
1512 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in set_btree_ioerr()
1518 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
1526 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); in set_btree_ioerr()
1530 * failed, increment the counter transaction->eb_write_errors. in set_btree_ioerr()
1534 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it in set_btree_ioerr()
1543 * btree nodes/leafs whose content on disk is invalid - either garbage in set_btree_ioerr()
1548 * not be enough - we need to distinguish between log tree extents vs in set_btree_ioerr()
1549 * non-log tree extents, and the next filemap_fdatawait_range() call in set_btree_ioerr()
1550 * will catch and clear such errors in the mapping - and that call might in set_btree_ioerr()
1553 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
1561 * with errors - because we were not using AS_EIO/AS_ENOSPC, in set_btree_ioerr()
1566 switch (eb->log_index) { in set_btree_ioerr()
1567 case -1: in set_btree_ioerr()
1568 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags); in set_btree_ioerr()
1571 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); in set_btree_ioerr()
1574 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); in set_btree_ioerr()
1591 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_nolock()
1592 start >> fs_info->sectorsize_bits); in find_extent_buffer_nolock()
1593 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer_nolock()
1603 struct extent_buffer *eb = bbio->private; in extent_buffer_write_end_io()
1604 struct btrfs_fs_info *fs_info = eb->fs_info; in extent_buffer_write_end_io()
1605 bool uptodate = !bbio->bio.bi_status; in extent_buffer_write_end_io()
1613 bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { in extent_buffer_write_end_io()
1614 u64 start = eb->start + bio_offset; in extent_buffer_write_end_io()
1615 struct page *page = bvec->bv_page; in extent_buffer_write_end_io() local
1616 u32 len = bvec->bv_len; in extent_buffer_write_end_io()
1618 btrfs_page_clear_writeback(fs_info, page, start, len); in extent_buffer_write_end_io()
1622 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in extent_buffer_write_end_io()
1624 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in extent_buffer_write_end_io()
1626 bio_put(&bbio->bio); in extent_buffer_write_end_io()
1635 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
1641 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
1650 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); in prepare_eb_write()
1652 end += btrfs_item_offset(eb, nritems - 1); in prepare_eb_write()
1653 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
1660 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_eb()
1667 eb->fs_info, extent_buffer_write_end_io, eb); in write_one_eb()
1668 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in write_one_eb()
1669 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev); in write_one_eb()
1670 wbc_init_bio(wbc, &bbio->bio); in write_one_eb()
1671 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in write_one_eb()
1672 bbio->file_offset = eb->start; in write_one_eb()
1673 if (fs_info->nodesize < PAGE_SIZE) { in write_one_eb()
1674 struct page *p = eb->pages[0]; in write_one_eb()
1677 btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len); in write_one_eb()
1678 if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start, in write_one_eb()
1679 eb->len)) { in write_one_eb()
1681 wbc->nr_to_write--; in write_one_eb()
1683 __bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p)); in write_one_eb()
1684 wbc_account_cgroup_owner(wbc, p, eb->len); in write_one_eb()
1688 struct page *p = eb->pages[i]; in write_one_eb()
1693 __bio_add_page(&bbio->bio, p, PAGE_SIZE, 0); in write_one_eb()
1695 wbc->nr_to_write--; in write_one_eb()
1703 * Submit one subpage btree page.
1706 * - Page locking
1707 * For subpage, we don't rely on page locking at all.
1709 * - Flush write bio
1716 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) in submit_eb_subpage() argument
1718 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); in submit_eb_subpage()
1720 u64 page_start = page_offset(page); in submit_eb_subpage()
1722 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits; in submit_eb_subpage()
1725 while (bit_start < fs_info->subpage_info->bitmap_nr_bits) { in submit_eb_subpage()
1726 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in submit_eb_subpage()
1735 spin_lock(&page->mapping->private_lock); in submit_eb_subpage()
1736 if (!PagePrivate(page)) { in submit_eb_subpage()
1737 spin_unlock(&page->mapping->private_lock); in submit_eb_subpage()
1740 spin_lock_irqsave(&subpage->lock, flags); in submit_eb_subpage()
1741 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset, in submit_eb_subpage()
1742 subpage->bitmaps)) { in submit_eb_subpage()
1743 spin_unlock_irqrestore(&subpage->lock, flags); in submit_eb_subpage()
1744 spin_unlock(&page->mapping->private_lock); in submit_eb_subpage()
1749 start = page_start + bit_start * fs_info->sectorsize; in submit_eb_subpage()
1757 spin_unlock_irqrestore(&subpage->lock, flags); in submit_eb_subpage()
1758 spin_unlock(&page->mapping->private_lock); in submit_eb_subpage()
1778 * Submit all page(s) of one extent buffer.
1780 * @page: the page of one extent buffer
1781 * @eb_context: to determine if we need to submit this page, if current page
1784 * The caller should pass each page in their bytenr order, and here we use
1787 * If we have, we just skip until we hit a new page that doesn't belong to
1790 * If not, we submit all the page(s) of the extent buffer.
1793 * Return 0 if we don't need to submit the page, as it's already submitted by
1797 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx) in submit_eb_page() argument
1799 struct writeback_control *wbc = ctx->wbc; in submit_eb_page()
1800 struct address_space *mapping = page->mapping; in submit_eb_page()
1804 if (!PagePrivate(page)) in submit_eb_page()
1807 if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) in submit_eb_page()
1808 return submit_eb_subpage(page, wbc); in submit_eb_page()
1810 spin_lock(&mapping->private_lock); in submit_eb_page()
1811 if (!PagePrivate(page)) { in submit_eb_page()
1812 spin_unlock(&mapping->private_lock); in submit_eb_page()
1816 eb = (struct extent_buffer *)page->private; in submit_eb_page()
1823 spin_unlock(&mapping->private_lock); in submit_eb_page()
1827 if (eb == ctx->eb) { in submit_eb_page()
1828 spin_unlock(&mapping->private_lock); in submit_eb_page()
1831 ret = atomic_inc_not_zero(&eb->refs); in submit_eb_page()
1832 spin_unlock(&mapping->private_lock); in submit_eb_page()
1836 ctx->eb = eb; in submit_eb_page()
1838 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx); in submit_eb_page()
1840 if (ret == -EBUSY) in submit_eb_page()
1851 if (ctx->zoned_bg) { in submit_eb_page()
1853 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); in submit_eb_page()
1854 ctx->zoned_bg->meta_write_pointer += eb->len; in submit_eb_page()
1865 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; in btree_write_cache_pages()
1877 if (wbc->range_cyclic) { in btree_write_cache_pages()
1878 index = mapping->writeback_index; /* Start from prev offset */ in btree_write_cache_pages()
1879 end = -1; in btree_write_cache_pages()
1886 index = wbc->range_start >> PAGE_SHIFT; in btree_write_cache_pages()
1887 end = wbc->range_end >> PAGE_SHIFT; in btree_write_cache_pages()
1890 if (wbc->sync_mode == WB_SYNC_ALL) in btree_write_cache_pages()
1896 if (wbc->sync_mode == WB_SYNC_ALL) in btree_write_cache_pages()
1906 ret = submit_eb_page(&folio->page, &ctx); in btree_write_cache_pages()
1919 nr_to_write_done = wbc->nr_to_write <= 0; in btree_write_cache_pages()
1926 * We hit the last page and there is more work to be done: wrap in btree_write_cache_pages()
1937 * This would prevent use-after-free if we had dirty pages not in btree_write_cache_pages()
1940 * - Bad extent tree in btree_write_cache_pages()
1943 * - Log tree operations in btree_write_cache_pages()
1945 * generation, then get cleaned in tree re-balance. in btree_write_cache_pages()
1951 * - Offending tree block gets re-dirtied from its original owner in btree_write_cache_pages()
1966 ret = -EROFS; in btree_write_cache_pages()
1978 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1981 * If a page is already under I/O, write_cache_pages() skips it, even
1982 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1983 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1985 * the call was made get new I/O started against them. If wbc->sync_mode is
1992 struct writeback_control *wbc = bio_ctrl->wbc; in extent_write_cache_pages()
1993 struct inode *inode = mapping->host; in extent_write_cache_pages()
2019 if (wbc->range_cyclic) { in extent_write_cache_pages()
2020 index = mapping->writeback_index; /* Start from prev offset */ in extent_write_cache_pages()
2021 end = -1; in extent_write_cache_pages()
2028 index = wbc->range_start >> PAGE_SHIFT; in extent_write_cache_pages()
2029 end = wbc->range_end >> PAGE_SHIFT; in extent_write_cache_pages()
2030 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in extent_write_cache_pages()
2042 if (range_whole && wbc->nr_to_write == LONG_MAX && in extent_write_cache_pages()
2044 &BTRFS_I(inode)->runtime_flags)) in extent_write_cache_pages()
2045 wbc->tagged_writepages = 1; in extent_write_cache_pages()
2047 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in extent_write_cache_pages()
2052 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in extent_write_cache_pages()
2066 * the page lock: the page may be truncated or in extent_write_cache_pages()
2067 * invalidated (changing page->mapping to NULL), in extent_write_cache_pages()
2076 if (unlikely(folio->mapping != mapping)) { in extent_write_cache_pages()
2087 if (wbc->sync_mode != WB_SYNC_NONE) { in extent_write_cache_pages()
2099 ret = __extent_writepage(&folio->page, bio_ctrl); in extent_write_cache_pages()
2110 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && in extent_write_cache_pages()
2111 wbc->nr_to_write <= 0); in extent_write_cache_pages()
2118 * We hit the last page and there is more work to be done: wrap in extent_write_cache_pages()
2125 * If we're looping we could run into a page that is locked by a in extent_write_cache_pages()
2127 * page in our current bio, and thus deadlock, so flush the in extent_write_cache_pages()
2134 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) in extent_write_cache_pages()
2135 mapping->writeback_index = done_index; in extent_write_cache_pages()
2146 void extent_write_locked_range(struct inode *inode, struct page *locked_page, in extent_write_locked_range()
2152 struct address_space *mapping = inode->i_mapping; in extent_write_locked_range()
2153 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in extent_write_locked_range()
2154 const u32 sectorsize = fs_info->sectorsize; in extent_write_locked_range()
2162 if (wbc->no_cgroup_owner) in extent_write_locked_range()
2168 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); in extent_write_locked_range()
2169 u32 cur_len = cur_end + 1 - cur; in extent_write_locked_range()
2170 struct page *page; in extent_write_locked_range() local
2173 page = find_get_page(mapping, cur >> PAGE_SHIFT); in extent_write_locked_range()
2174 ASSERT(PageLocked(page)); in extent_write_locked_range()
2175 if (pages_dirty && page != locked_page) in extent_write_locked_range()
2176 ASSERT(PageDirty(page)); in extent_write_locked_range()
2178 ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl, in extent_write_locked_range()
2183 /* Make sure the mapping tag for page dirty gets cleared. */ in extent_write_locked_range()
2185 set_page_writeback(page); in extent_write_locked_range()
2186 end_page_writeback(page); in extent_write_locked_range()
2189 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, in extent_write_locked_range()
2191 mapping_set_error(page->mapping, ret); in extent_write_locked_range()
2193 btrfs_page_unlock_writer(fs_info, page, cur, cur_len); in extent_write_locked_range()
2197 put_page(page); in extent_write_locked_range()
2207 struct inode *inode = mapping->host; in extent_writepages()
2228 struct page *pagepool[16]; in extent_readahead()
2230 u64 prev_em_start = (u64)-1; in extent_readahead()
2235 u64 contig_end = contig_start + readahead_batch_length(rac) - 1; in extent_readahead()
2252 struct folio *folio, size_t offset) in extent_invalidate_folio() argument
2256 u64 end = start + folio_size(folio) - 1; in extent_invalidate_folio()
2257 size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize; in extent_invalidate_folio()
2260 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO); in extent_invalidate_folio()
2262 start += ALIGN(offset, blocksize); in extent_invalidate_folio()
2279 * a helper for release_folio, this tests for areas of the page that
2281 * to drop the page.
2284 struct page *page, gfp_t mask) in try_release_extent_state() argument
2286 u64 start = page_offset(page); in try_release_extent_state()
2287 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
2318 * in the range corresponding to the page, both state records and extent
2321 int try_release_extent_mapping(struct page *page, gfp_t mask) in try_release_extent_mapping() argument
2324 u64 start = page_offset(page); in try_release_extent_mapping()
2325 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
2326 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); in try_release_extent_mapping()
2327 struct extent_io_tree *tree = &btrfs_inode->io_tree; in try_release_extent_mapping()
2328 struct extent_map_tree *map = &btrfs_inode->extent_tree; in try_release_extent_mapping()
2331 page->mapping->host->i_size > SZ_16M) { in try_release_extent_mapping()
2337 len = end - start + 1; in try_release_extent_mapping()
2338 write_lock(&map->lock); in try_release_extent_mapping()
2341 write_unlock(&map->lock); in try_release_extent_mapping()
2344 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || in try_release_extent_mapping()
2345 em->start != start) { in try_release_extent_mapping()
2346 write_unlock(&map->lock); in try_release_extent_mapping()
2350 if (test_range_bit(tree, em->start, in try_release_extent_mapping()
2351 extent_map_end(em) - 1, in try_release_extent_mapping()
2360 if (list_empty(&em->list) || in try_release_extent_mapping()
2361 test_bit(EXTENT_FLAG_LOGGING, &em->flags)) in try_release_extent_mapping()
2370 fs_info = btrfs_inode->root->fs_info; in try_release_extent_mapping()
2371 spin_lock(&fs_info->trans_lock); in try_release_extent_mapping()
2372 cur_gen = fs_info->generation; in try_release_extent_mapping()
2373 spin_unlock(&fs_info->trans_lock); in try_release_extent_mapping()
2374 if (em->generation >= cur_gen) in try_release_extent_mapping()
2390 write_unlock(&map->lock); in try_release_extent_mapping()
2395 cond_resched(); /* Allow large-extent preemption. */ in try_release_extent_mapping()
2398 return try_release_extent_state(tree, page, mask); in try_release_extent_mapping()
2402 u64 offset; member
2417 #define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2422 * - Cache the next entry to be emitted to the fiemap buffer, so that we can
2425 * - Store extents ready to be written to the fiemap buffer in an intermediary
2444 * Once the entries array is full, this indicates what's the offset for
2460 u64 offset; member
2470 for (int i = 0; i < cache->entries_pos; i++) { in flush_fiemap_cache()
2471 struct btrfs_fiemap_entry *entry = &cache->entries[i]; in flush_fiemap_cache()
2474 ret = fiemap_fill_next_extent(fieinfo, entry->offset, in flush_fiemap_cache()
2475 entry->phys, entry->len, in flush_fiemap_cache()
2476 entry->flags); in flush_fiemap_cache()
2484 cache->entries_pos = 0; in flush_fiemap_cache()
2492 * Will try to merge current fiemap extent specified by @offset, @phys,
2501 u64 offset, u64 phys, u64 len, u32 flags) in emit_fiemap_extent() argument
2509 if (!cache->cached) in emit_fiemap_extent()
2514 * find an extent that starts at an offset behind the end offset of the in emit_fiemap_extent()
2518 * the fiemap extents stored in the buffer (cache->entries array) and in emit_fiemap_extent()
2519 * the lock the remainder of the range and re-search the btree. in emit_fiemap_extent()
2528 * the first item of the next leaf - in either case btrfs_next_leaf() in emit_fiemap_extent()
2536 * offset smaller than or equals to cache->offset, and this happens in emit_fiemap_extent()
2540 * now have found a file extent item for an offset that is smaller than in emit_fiemap_extent()
2541 * or equals to what we have in cache->offset. We deal with this as in emit_fiemap_extent()
2544 cache_end = cache->offset + cache->len; in emit_fiemap_extent()
2545 if (cache_end > offset) { in emit_fiemap_extent()
2546 if (offset == cache->offset) { in emit_fiemap_extent()
2550 * file extent item for the same offset. What we have in emit_fiemap_extent()
2555 } else if (offset > cache->offset) { in emit_fiemap_extent()
2558 * offset of the file extent item we found and that in emit_fiemap_extent()
2559 * offset falls somewhere in the middle of that previous in emit_fiemap_extent()
2561 * to end at the offset of the file extent item we have in emit_fiemap_extent()
2568 cache->len = offset - cache->offset; in emit_fiemap_extent()
2571 const u64 range_end = offset + len; in emit_fiemap_extent()
2574 * The offset of the file extent item we have just found in emit_fiemap_extent()
2575 * is behind the cached offset. This means we were in emit_fiemap_extent()
2592 * cached extent but its end offset goes beyond the in emit_fiemap_extent()
2593 * end offset of the cached extent. We don't want to in emit_fiemap_extent()
2605 phys += cache_end - offset; in emit_fiemap_extent()
2607 offset = cache_end; in emit_fiemap_extent()
2608 len = range_end - cache_end; in emit_fiemap_extent()
2623 if (cache->offset + cache->len == offset && in emit_fiemap_extent()
2624 cache->phys + cache->len == phys && in emit_fiemap_extent()
2625 cache->flags == flags) { in emit_fiemap_extent()
2626 cache->len += len; in emit_fiemap_extent()
2633 if (cache->entries_pos == cache->entries_size) { in emit_fiemap_extent()
2635 * We will need to research for the end offset of the last in emit_fiemap_extent()
2636 * stored extent and not from the current offset, because after in emit_fiemap_extent()
2638 * between that end offset and this current offset, a new extent in emit_fiemap_extent()
2642 entry = &cache->entries[cache->entries_size - 1]; in emit_fiemap_extent()
2643 cache->next_search_offset = entry->offset + entry->len; in emit_fiemap_extent()
2644 cache->cached = false; in emit_fiemap_extent()
2649 entry = &cache->entries[cache->entries_pos]; in emit_fiemap_extent()
2650 entry->offset = cache->offset; in emit_fiemap_extent()
2651 entry->phys = cache->phys; in emit_fiemap_extent()
2652 entry->len = cache->len; in emit_fiemap_extent()
2653 entry->flags = cache->flags; in emit_fiemap_extent()
2654 cache->entries_pos++; in emit_fiemap_extent()
2655 cache->extents_mapped++; in emit_fiemap_extent()
2657 if (cache->extents_mapped == fieinfo->fi_extents_max) { in emit_fiemap_extent()
2658 cache->cached = false; in emit_fiemap_extent()
2662 cache->cached = true; in emit_fiemap_extent()
2663 cache->offset = offset; in emit_fiemap_extent()
2664 cache->phys = phys; in emit_fiemap_extent()
2665 cache->len = len; in emit_fiemap_extent()
2666 cache->flags = flags; in emit_fiemap_extent()
2676 * |<- Fiemap range ->|
2677 * |<------------ First extent ----------->|
2687 if (!cache->cached) in emit_last_fiemap_cache()
2690 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, in emit_last_fiemap_cache()
2691 cache->len, cache->flags); in emit_last_fiemap_cache()
2692 cache->cached = false; in emit_last_fiemap_cache()
2705 path->slots[0]++; in fiemap_next_leaf_item()
2706 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) in fiemap_next_leaf_item()
2709 ret = btrfs_next_leaf(inode->root, path); in fiemap_next_leaf_item()
2717 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in fiemap_next_leaf_item()
2722 clone = btrfs_clone_extent_buffer(path->nodes[0]); in fiemap_next_leaf_item()
2724 return -ENOMEM; in fiemap_next_leaf_item()
2726 slot = path->slots[0]; in fiemap_next_leaf_item()
2728 path->nodes[0] = clone; in fiemap_next_leaf_item()
2729 path->slots[0] = slot; in fiemap_next_leaf_item()
2735 * Search for the first file extent item that starts at a given file offset or
2736 * the one that starts immediately before that offset.
2743 struct btrfs_root *root = inode->root; in fiemap_search_slot()
2751 key.offset = file_offset; in fiemap_search_slot()
2757 if (ret > 0 && path->slots[0] > 0) { in fiemap_search_slot()
2758 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); in fiemap_search_slot()
2760 path->slots[0]--; in fiemap_search_slot()
2763 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { in fiemap_search_slot()
2768 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in fiemap_search_slot()
2789 clone = btrfs_clone_extent_buffer(path->nodes[0]); in fiemap_search_slot()
2791 return -ENOMEM; in fiemap_search_slot()
2793 slot = path->slots[0]; in fiemap_search_slot()
2795 path->nodes[0] = clone; in fiemap_search_slot()
2796 path->slots[0] = slot; in fiemap_search_slot()
2804 * extent. The end offset (@end) is inclusive.
2815 const u64 i_size = i_size_read(&inode->vfs_inode); in fiemap_process_hole()
2847 prealloc_len = delalloc_start - start; in fiemap_process_hole()
2850 prealloc_len = delalloc_start - prealloc_start; in fiemap_process_hole()
2855 if (!checked_extent_shared && fieinfo->fi_extents_max) { in fiemap_process_hole()
2876 delalloc_end + 1 - delalloc_start, in fiemap_process_hole()
2884 extent_offset += cur_offset - delalloc_start; in fiemap_process_hole()
2898 prealloc_len = end + 1 - start; in fiemap_process_hole()
2901 prealloc_len = end + 1 - prealloc_start; in fiemap_process_hole()
2904 if (!checked_extent_shared && fieinfo->fi_extents_max) { in fiemap_process_hole()
2929 struct btrfs_root *root = inode->root; in fiemap_find_last_extent_offset()
2940 ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0); in fiemap_find_last_extent_offset()
2941 /* There can't be a file extent item at offset (u64)-1 */ in fiemap_find_last_extent_offset()
2947 * For a non-existing key, btrfs_search_slot() always leaves us at a in fiemap_find_last_extent_offset()
2952 ASSERT(path->slots[0] > 0); in fiemap_find_last_extent_offset()
2953 path->slots[0]--; in fiemap_find_last_extent_offset()
2954 leaf = path->nodes[0]; in fiemap_find_last_extent_offset()
2955 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in fiemap_find_last_extent_offset()
2967 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in fiemap_find_last_extent_offset()
2990 leaf = path->nodes[0]; in fiemap_find_last_extent_offset()
2991 ei = btrfs_item_ptr(leaf, path->slots[0], in fiemap_find_last_extent_offset()
3013 const u64 sectorsize = inode->root->fs_info->sectorsize; in extent_fiemap()
3024 ret = -ENOMEM; in extent_fiemap()
3033 lock_extent(&inode->io_tree, range_start, range_end, &cached_state); in extent_fiemap()
3040 path->reada = READA_FORWARD; in extent_fiemap()
3047 * the current offset and i_size. So check for that. in extent_fiemap()
3054 struct extent_buffer *leaf = path->nodes[0]; in extent_fiemap()
3066 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in extent_fiemap()
3079 backref_ctx->curr_leaf_bytenr = leaf->start; in extent_fiemap()
3082 if (prev_extent_end < key.offset) { in extent_fiemap()
3083 const u64 hole_end = min(key.offset, range_end) - 1; in extent_fiemap()
3098 if (key.offset >= range_end) { in extent_fiemap()
3104 extent_len = extent_end - key.offset; in extent_fiemap()
3105 ei = btrfs_item_ptr(leaf, path->slots[0], in extent_fiemap()
3123 ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0, in extent_fiemap()
3130 extent_gen, key.offset, in extent_fiemap()
3131 extent_end - 1); in extent_fiemap()
3137 key.offset, extent_end - 1); in extent_fiemap()
3140 if (fieinfo->fi_extents_max) { in extent_fiemap()
3151 ret = emit_fiemap_extent(fieinfo, &cache, key.offset, in extent_fiemap()
3167 ret = -EINTR; in extent_fiemap()
3185 0, 0, 0, prev_extent_end, range_end - 1); in extent_fiemap()
3191 if (cache.cached && cache.offset + cache.len >= last_extent_end) { in extent_fiemap()
3192 const u64 i_size = i_size_read(&inode->vfs_inode); in extent_fiemap()
3201 i_size - 1, in extent_fiemap()
3213 unlock_extent(&inode->io_tree, range_start, range_end, &cached_state); in extent_fiemap()
3220 len -= cache.next_search_offset - start; in extent_fiemap()
3229 * may have a non-cloned leaf and if the fiemap buffer is memory mapped in extent_fiemap()
3257 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
3258 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
3261 static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page) in page_range_has_eb() argument
3265 lockdep_assert_held(&page->mapping->private_lock); in page_range_has_eb()
3267 if (PagePrivate(page)) { in page_range_has_eb()
3268 subpage = (struct btrfs_subpage *)page->private; in page_range_has_eb()
3269 if (atomic_read(&subpage->eb_refs)) in page_range_has_eb()
3273 * end_page_read() call relying on page::private. in page_range_has_eb()
3275 if (atomic_read(&subpage->readers)) in page_range_has_eb()
3281 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page) in detach_extent_buffer_page() argument
3283 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_page()
3284 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_page()
3287 * For mapped eb, we're going to change the page private, which should in detach_extent_buffer_page()
3291 spin_lock(&page->mapping->private_lock); in detach_extent_buffer_page()
3293 if (!PagePrivate(page)) { in detach_extent_buffer_page()
3295 spin_unlock(&page->mapping->private_lock); in detach_extent_buffer_page()
3299 if (fs_info->nodesize >= PAGE_SIZE) { in detach_extent_buffer_page()
3303 * and have this page now attached to the new eb. So in detach_extent_buffer_page()
3307 if (PagePrivate(page) && in detach_extent_buffer_page()
3308 page->private == (unsigned long)eb) { in detach_extent_buffer_page()
3309 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_page()
3310 BUG_ON(PageDirty(page)); in detach_extent_buffer_page()
3311 BUG_ON(PageWriteback(page)); in detach_extent_buffer_page()
3316 detach_page_private(page); in detach_extent_buffer_page()
3319 spin_unlock(&page->mapping->private_lock); in detach_extent_buffer_page()
3324 * For subpage, we can have dummy eb with page private. In this case, in detach_extent_buffer_page()
3325 * we can directly detach the private as such page is only attached to in detach_extent_buffer_page()
3329 btrfs_detach_subpage(fs_info, page); in detach_extent_buffer_page()
3333 btrfs_page_dec_eb_refs(fs_info, page); in detach_extent_buffer_page()
3336 * We can only detach the page private if there are no other ebs in the in detach_extent_buffer_page()
3337 * page range and no unfinished IO. in detach_extent_buffer_page()
3339 if (!page_range_has_eb(fs_info, page)) in detach_extent_buffer_page()
3340 btrfs_detach_subpage(fs_info, page); in detach_extent_buffer_page()
3342 spin_unlock(&page->mapping->private_lock); in detach_extent_buffer_page()
3355 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages() local
3357 if (!page) in btrfs_release_extent_buffer_pages()
3360 detach_extent_buffer_page(eb, page); in btrfs_release_extent_buffer_pages()
3362 /* One for when we allocated the page */ in btrfs_release_extent_buffer_pages()
3363 put_page(page); in btrfs_release_extent_buffer_pages()
3384 eb->start = start; in __alloc_extent_buffer()
3385 eb->len = len; in __alloc_extent_buffer()
3386 eb->fs_info = fs_info; in __alloc_extent_buffer()
3387 init_rwsem(&eb->lock); in __alloc_extent_buffer()
3391 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
3392 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
3406 new = __alloc_extent_buffer(src->fs_info, src->start, src->len); in btrfs_clone_extent_buffer()
3415 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags); in btrfs_clone_extent_buffer()
3417 ret = btrfs_alloc_page_array(num_pages, new->pages); in btrfs_clone_extent_buffer()
3425 struct page *p = new->pages[i]; in btrfs_clone_extent_buffer()
3453 ret = btrfs_alloc_page_array(num_pages, eb->pages); in __alloc_dummy_extent_buffer()
3458 struct page *p = eb->pages[i]; in __alloc_dummy_extent_buffer()
3467 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
3472 if (eb->pages[i]) { in __alloc_dummy_extent_buffer()
3473 detach_extent_buffer_page(eb, eb->pages[i]); in __alloc_dummy_extent_buffer()
3474 __free_page(eb->pages[i]); in __alloc_dummy_extent_buffer()
3484 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize); in alloc_dummy_extent_buffer()
3495 * It is only cleared in two cases: freeing the last non-tree in check_buffer_tree_ref()
3513 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
3514 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3517 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
3518 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3519 atomic_inc(&eb->refs); in check_buffer_tree_ref()
3520 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
3524 struct page *accessed) in mark_extent_buffer_accessed()
3532 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
3551 * set, eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
3560 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
3561 spin_lock(&eb->refs_lock); in find_extent_buffer()
3562 spin_unlock(&eb->refs_lock); in find_extent_buffer()
3580 return ERR_PTR(-ENOMEM); in alloc_test_extent_buffer()
3581 eb->fs_info = fs_info; in alloc_test_extent_buffer()
3588 spin_lock(&fs_info->buffer_lock); in alloc_test_extent_buffer()
3589 ret = radix_tree_insert(&fs_info->buffer_radix, in alloc_test_extent_buffer()
3590 start >> fs_info->sectorsize_bits, eb); in alloc_test_extent_buffer()
3591 spin_unlock(&fs_info->buffer_lock); in alloc_test_extent_buffer()
3593 if (ret == -EEXIST) { in alloc_test_extent_buffer()
3601 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
3611 struct btrfs_fs_info *fs_info, struct page *page) in grab_extent_buffer() argument
3620 if (fs_info->nodesize < PAGE_SIZE) in grab_extent_buffer()
3623 /* Page not yet attached to an extent buffer */ in grab_extent_buffer()
3624 if (!PagePrivate(page)) in grab_extent_buffer()
3628 * We could have already allocated an eb for this page and attached one in grab_extent_buffer()
3631 * just overwrite page->private. in grab_extent_buffer()
3633 exists = (struct extent_buffer *)page->private; in grab_extent_buffer()
3634 if (atomic_inc_not_zero(&exists->refs)) in grab_extent_buffer()
3637 WARN_ON(PageDirty(page)); in grab_extent_buffer()
3638 detach_page_private(page); in grab_extent_buffer()
3644 if (!IS_ALIGNED(start, fs_info->sectorsize)) { in check_eb_alignment()
3646 return -EINVAL; in check_eb_alignment()
3649 if (fs_info->nodesize < PAGE_SIZE && in check_eb_alignment()
3650 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) { in check_eb_alignment()
3652 "tree block crosses page boundary, start %llu nodesize %u", in check_eb_alignment()
3653 start, fs_info->nodesize); in check_eb_alignment()
3654 return -EINVAL; in check_eb_alignment()
3656 if (fs_info->nodesize >= PAGE_SIZE && in check_eb_alignment()
3659 "tree block is not page aligned, start %llu nodesize %u", in check_eb_alignment()
3660 start, fs_info->nodesize); in check_eb_alignment()
3661 return -EINVAL; in check_eb_alignment()
3669 unsigned long len = fs_info->nodesize; in alloc_extent_buffer()
3675 struct page *p; in alloc_extent_buffer()
3676 struct address_space *mapping = fs_info->btree_inode->i_mapping; in alloc_extent_buffer()
3683 return ERR_PTR(-EINVAL); in alloc_extent_buffer()
3688 "extent buffer %llu is beyond 32bit page cache limit", start); in alloc_extent_buffer()
3690 return ERR_PTR(-EOVERFLOW); in alloc_extent_buffer()
3702 return ERR_PTR(-ENOMEM); in alloc_extent_buffer()
3716 * Preallocate page->private for subpage case, so that we won't in alloc_extent_buffer()
3717 * allocate memory with private_lock nor page lock hold. in alloc_extent_buffer()
3722 if (fs_info->nodesize < PAGE_SIZE) { in alloc_extent_buffer()
3733 exists = ERR_PTR(-ENOMEM); in alloc_extent_buffer()
3738 spin_lock(&mapping->private_lock); in alloc_extent_buffer()
3741 spin_unlock(&mapping->private_lock); in alloc_extent_buffer()
3753 * detach_extent_buffer_page() won't release the page private in alloc_extent_buffer()
3756 * The ref will be decreased when the eb released the page, in in alloc_extent_buffer()
3761 spin_unlock(&mapping->private_lock); in alloc_extent_buffer()
3763 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len)); in alloc_extent_buffer()
3764 eb->pages[i] = p; in alloc_extent_buffer()
3765 if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len)) in alloc_extent_buffer()
3771 * opens a race with btree_release_folio which can free a page in alloc_extent_buffer()
3777 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
3785 spin_lock(&fs_info->buffer_lock); in alloc_extent_buffer()
3786 ret = radix_tree_insert(&fs_info->buffer_radix, in alloc_extent_buffer()
3787 start >> fs_info->sectorsize_bits, eb); in alloc_extent_buffer()
3788 spin_unlock(&fs_info->buffer_lock); in alloc_extent_buffer()
3790 if (ret == -EEXIST) { in alloc_extent_buffer()
3799 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
3803 * btree_release_folio will correctly detect that a page belongs to a in alloc_extent_buffer()
3807 unlock_page(eb->pages[i]); in alloc_extent_buffer()
3811 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
3813 if (eb->pages[i]) in alloc_extent_buffer()
3814 unlock_page(eb->pages[i]); in alloc_extent_buffer()
3830 __releases(&eb->refs_lock) in release_extent_buffer()
3832 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
3834 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
3835 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
3836 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
3837 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
3839 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3841 spin_lock(&fs_info->buffer_lock); in release_extent_buffer()
3842 radix_tree_delete(&fs_info->buffer_radix, in release_extent_buffer()
3843 eb->start >> fs_info->sectorsize_bits); in release_extent_buffer()
3844 spin_unlock(&fs_info->buffer_lock); in release_extent_buffer()
3846 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3853 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
3858 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
3861 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3872 refs = atomic_read(&eb->refs); in free_extent_buffer()
3874 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
3875 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
3878 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) in free_extent_buffer()
3882 spin_lock(&eb->refs_lock); in free_extent_buffer()
3883 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
3884 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
3886 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
3887 atomic_dec(&eb->refs); in free_extent_buffer()
3901 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
3902 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
3904 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
3905 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
3906 atomic_dec(&eb->refs); in free_extent_buffer_stale()
3910 static void btree_clear_page_dirty(struct page *page) in btree_clear_page_dirty() argument
3912 ASSERT(PageDirty(page)); in btree_clear_page_dirty()
3913 ASSERT(PageLocked(page)); in btree_clear_page_dirty()
3914 clear_page_dirty_for_io(page); in btree_clear_page_dirty()
3915 xa_lock_irq(&page->mapping->i_pages); in btree_clear_page_dirty()
3916 if (!PageDirty(page)) in btree_clear_page_dirty()
3917 __xa_clear_mark(&page->mapping->i_pages, in btree_clear_page_dirty()
3918 page_index(page), PAGECACHE_TAG_DIRTY); in btree_clear_page_dirty()
3919 xa_unlock_irq(&page->mapping->i_pages); in btree_clear_page_dirty()
3924 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_subpage_extent_buffer_dirty()
3925 struct page *page = eb->pages[0]; in clear_subpage_extent_buffer_dirty() local
3928 /* btree_clear_page_dirty() needs page locked */ in clear_subpage_extent_buffer_dirty()
3929 lock_page(page); in clear_subpage_extent_buffer_dirty()
3930 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start, in clear_subpage_extent_buffer_dirty()
3931 eb->len); in clear_subpage_extent_buffer_dirty()
3933 btree_clear_page_dirty(page); in clear_subpage_extent_buffer_dirty()
3934 unlock_page(page); in clear_subpage_extent_buffer_dirty()
3935 WARN_ON(atomic_read(&eb->refs) == 0); in clear_subpage_extent_buffer_dirty()
3941 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_clear_buffer_dirty()
3944 struct page *page; in btrfs_clear_buffer_dirty() local
3948 if (trans && btrfs_header_generation(eb) != trans->transid) in btrfs_clear_buffer_dirty()
3951 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) in btrfs_clear_buffer_dirty()
3954 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, in btrfs_clear_buffer_dirty()
3955 fs_info->dirty_metadata_batch); in btrfs_clear_buffer_dirty()
3957 if (eb->fs_info->nodesize < PAGE_SIZE) in btrfs_clear_buffer_dirty()
3963 page = eb->pages[i]; in btrfs_clear_buffer_dirty()
3964 if (!PageDirty(page)) in btrfs_clear_buffer_dirty()
3966 lock_page(page); in btrfs_clear_buffer_dirty()
3967 btree_clear_page_dirty(page); in btrfs_clear_buffer_dirty()
3968 unlock_page(page); in btrfs_clear_buffer_dirty()
3970 WARN_ON(atomic_read(&eb->refs) == 0); in btrfs_clear_buffer_dirty()
3981 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
3984 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
3985 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
3988 bool subpage = eb->fs_info->nodesize < PAGE_SIZE; in set_extent_buffer_dirty()
3992 * same page, and in clear_subpage_extent_buffer_dirty() we in set_extent_buffer_dirty()
3993 * have to clear page dirty without subpage lock held. in set_extent_buffer_dirty()
3994 * This can cause race where our page gets dirty cleared after in set_extent_buffer_dirty()
3998 * its page for other reasons, we can use page lock to prevent in set_extent_buffer_dirty()
4002 lock_page(eb->pages[0]); in set_extent_buffer_dirty()
4004 btrfs_page_set_dirty(eb->fs_info, eb->pages[i], in set_extent_buffer_dirty()
4005 eb->start, eb->len); in set_extent_buffer_dirty()
4007 unlock_page(eb->pages[0]); in set_extent_buffer_dirty()
4008 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, in set_extent_buffer_dirty()
4009 eb->len, in set_extent_buffer_dirty()
4010 eb->fs_info->dirty_metadata_batch); in set_extent_buffer_dirty()
4014 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
4020 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_extent_buffer_uptodate()
4021 struct page *page; in clear_extent_buffer_uptodate() local
4025 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
4028 page = eb->pages[i]; in clear_extent_buffer_uptodate()
4029 if (!page) in clear_extent_buffer_uptodate()
4036 if (fs_info->nodesize >= PAGE_SIZE) in clear_extent_buffer_uptodate()
4037 ClearPageUptodate(page); in clear_extent_buffer_uptodate()
4039 btrfs_subpage_clear_uptodate(fs_info, page, eb->start, in clear_extent_buffer_uptodate()
4040 eb->len); in clear_extent_buffer_uptodate()
4046 struct btrfs_fs_info *fs_info = eb->fs_info; in set_extent_buffer_uptodate()
4047 struct page *page; in set_extent_buffer_uptodate() local
4051 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
4054 page = eb->pages[i]; in set_extent_buffer_uptodate()
4060 if (fs_info->nodesize >= PAGE_SIZE) in set_extent_buffer_uptodate()
4061 SetPageUptodate(page); in set_extent_buffer_uptodate()
4063 btrfs_subpage_set_uptodate(fs_info, page, eb->start, in set_extent_buffer_uptodate()
4064 eb->len); in set_extent_buffer_uptodate()
4070 struct extent_buffer *eb = bbio->private; in extent_buffer_read_end_io()
4071 struct btrfs_fs_info *fs_info = eb->fs_info; in extent_buffer_read_end_io()
4072 bool uptodate = !bbio->bio.bi_status; in extent_buffer_read_end_io()
4077 eb->read_mirror = bbio->mirror_num; in extent_buffer_read_end_io()
4080 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) in extent_buffer_read_end_io()
4087 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in extent_buffer_read_end_io()
4090 bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { in extent_buffer_read_end_io()
4091 u64 start = eb->start + bio_offset; in extent_buffer_read_end_io()
4092 struct page *page = bvec->bv_page; in extent_buffer_read_end_io() local
4093 u32 len = bvec->bv_len; in extent_buffer_read_end_io()
4096 btrfs_page_set_uptodate(fs_info, page, start, len); in extent_buffer_read_end_io()
4098 btrfs_page_clear_uptodate(fs_info, page, start, len); in extent_buffer_read_end_io()
4103 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in extent_buffer_read_end_io()
4105 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in extent_buffer_read_end_io()
4108 bio_put(&bbio->bio); in extent_buffer_read_end_io()
4117 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
4125 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages()
4126 return -EIO; in read_extent_buffer_pages()
4129 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) in read_extent_buffer_pages()
4138 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) { in read_extent_buffer_pages()
4139 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in read_extent_buffer_pages()
4141 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in read_extent_buffer_pages()
4145 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
4146 eb->read_mirror = 0; in read_extent_buffer_pages()
4148 atomic_inc(&eb->refs); in read_extent_buffer_pages()
4151 REQ_OP_READ | REQ_META, eb->fs_info, in read_extent_buffer_pages()
4153 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in read_extent_buffer_pages()
4154 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in read_extent_buffer_pages()
4155 bbio->file_offset = eb->start; in read_extent_buffer_pages()
4156 memcpy(&bbio->parent_check, check, sizeof(*check)); in read_extent_buffer_pages()
4157 if (eb->fs_info->nodesize < PAGE_SIZE) { in read_extent_buffer_pages()
4158 __bio_add_page(&bbio->bio, eb->pages[0], eb->len, in read_extent_buffer_pages()
4159 eb->start - page_offset(eb->pages[0])); in read_extent_buffer_pages()
4162 __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0); in read_extent_buffer_pages()
4168 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); in read_extent_buffer_pages()
4169 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
4170 return -EIO; in read_extent_buffer_pages()
4179 btrfs_warn(eb->fs_info, in report_eb_range()
4181 eb->start, eb->len, start, len); in report_eb_range()
4190 * NOTE: @start and @len are offset inside the eb, not logical address.
4197 unsigned long offset; in check_eb_range() local
4199 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
4200 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
4210 size_t offset; in read_extent_buffer() local
4211 struct page *page; in read_extent_buffer() local
4225 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer()
4228 page = eb->pages[i]; in read_extent_buffer()
4230 cur = min(len, (PAGE_SIZE - offset)); in read_extent_buffer()
4231 kaddr = page_address(page); in read_extent_buffer()
4232 memcpy(dst, kaddr + offset, cur); in read_extent_buffer()
4235 len -= cur; in read_extent_buffer()
4236 offset = 0; in read_extent_buffer()
4246 size_t offset; in read_extent_buffer_to_user_nofault() local
4247 struct page *page; in read_extent_buffer_to_user_nofault() local
4253 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
4254 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
4256 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer_to_user_nofault()
4259 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
4261 cur = min(len, (PAGE_SIZE - offset)); in read_extent_buffer_to_user_nofault()
4262 kaddr = page_address(page); in read_extent_buffer_to_user_nofault()
4263 if (copy_to_user_nofault(dst, kaddr + offset, cur)) { in read_extent_buffer_to_user_nofault()
4264 ret = -EFAULT; in read_extent_buffer_to_user_nofault()
4269 len -= cur; in read_extent_buffer_to_user_nofault()
4270 offset = 0; in read_extent_buffer_to_user_nofault()
4281 size_t offset; in memcmp_extent_buffer() local
4282 struct page *page; in memcmp_extent_buffer() local
4289 return -EINVAL; in memcmp_extent_buffer()
4291 offset = get_eb_offset_in_page(eb, start); in memcmp_extent_buffer()
4294 page = eb->pages[i]; in memcmp_extent_buffer()
4296 cur = min(len, (PAGE_SIZE - offset)); in memcmp_extent_buffer()
4298 kaddr = page_address(page); in memcmp_extent_buffer()
4299 ret = memcmp(ptr, kaddr + offset, cur); in memcmp_extent_buffer()
4304 len -= cur; in memcmp_extent_buffer()
4305 offset = 0; in memcmp_extent_buffer()
4314 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4318 struct page *page) in assert_eb_page_uptodate() argument
4320 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_page_uptodate()
4323 * If we are using the commit root we could potentially clear a page in assert_eb_page_uptodate()
4325 * looked up. We don't want to complain in this case, as the page was in assert_eb_page_uptodate()
4330 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in assert_eb_page_uptodate()
4333 if (fs_info->nodesize < PAGE_SIZE) { in assert_eb_page_uptodate()
4334 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page, in assert_eb_page_uptodate()
4335 eb->start, eb->len))) in assert_eb_page_uptodate()
4336 btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len); in assert_eb_page_uptodate()
4338 WARN_ON(!PageUptodate(page)); in assert_eb_page_uptodate()
4347 size_t offset; in __write_extent_buffer() local
4348 struct page *page; in __write_extent_buffer() local
4353 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __write_extent_buffer()
4355 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)); in __write_extent_buffer()
4360 offset = get_eb_offset_in_page(eb, start); in __write_extent_buffer()
4363 page = eb->pages[i]; in __write_extent_buffer()
4365 assert_eb_page_uptodate(eb, page); in __write_extent_buffer()
4367 cur = min(len, PAGE_SIZE - offset); in __write_extent_buffer()
4368 kaddr = page_address(page); in __write_extent_buffer()
4370 memmove(kaddr + offset, src, cur); in __write_extent_buffer()
4372 memcpy(kaddr + offset, src, cur); in __write_extent_buffer()
4375 len -= cur; in __write_extent_buffer()
4376 offset = 0; in __write_extent_buffer()
4394 unsigned int offset = get_eb_offset_in_page(eb, cur); in memset_extent_buffer() local
4395 unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset); in memset_extent_buffer()
4396 struct page *page = eb->pages[index]; in memset_extent_buffer() local
4398 assert_eb_page_uptodate(eb, page); in memset_extent_buffer()
4399 memset(page_address(page) + offset, c, cur_len); in memset_extent_buffer()
4418 ASSERT(dst->len == src->len); in copy_extent_buffer_full()
4420 while (cur < src->len) { in copy_extent_buffer_full()
4422 unsigned long offset = get_eb_offset_in_page(src, cur); in copy_extent_buffer_full() local
4423 unsigned long cur_len = min(src->len, PAGE_SIZE - offset); in copy_extent_buffer_full()
4424 void *addr = page_address(src->pages[index]) + offset; in copy_extent_buffer_full()
4437 u64 dst_len = dst->len; in copy_extent_buffer()
4439 size_t offset; in copy_extent_buffer() local
4440 struct page *page; in copy_extent_buffer() local
4448 WARN_ON(src->len != dst_len); in copy_extent_buffer()
4450 offset = get_eb_offset_in_page(dst, dst_offset); in copy_extent_buffer()
4453 page = dst->pages[i]; in copy_extent_buffer()
4454 assert_eb_page_uptodate(dst, page); in copy_extent_buffer()
4456 cur = min(len, (unsigned long)(PAGE_SIZE - offset)); in copy_extent_buffer()
4458 kaddr = page_address(page); in copy_extent_buffer()
4459 read_extent_buffer(src, kaddr + offset, src_offset, cur); in copy_extent_buffer()
4462 len -= cur; in copy_extent_buffer()
4463 offset = 0; in copy_extent_buffer()
4469 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
4472 * @start: offset of the bitmap item in the extent buffer
4474 * @page_index: return index of the page in the extent buffer that contains the
4476 * @page_offset: return offset into the page given by page_index
4487 size_t offset; in eb_bitmap_offset() local
4490 * The byte we want is the offset of the extent buffer + the offset of in eb_bitmap_offset()
4491 * the bitmap item in the extent buffer + the offset of the byte in the in eb_bitmap_offset()
4494 offset = start + offset_in_page(eb->start) + byte_offset; in eb_bitmap_offset()
4496 *page_index = offset >> PAGE_SHIFT; in eb_bitmap_offset()
4497 *page_offset = offset_in_page(offset); in eb_bitmap_offset()
4504 * @start: offset of the bitmap item in the extent buffer
4511 struct page *page; in extent_buffer_test_bit() local
4513 size_t offset; in extent_buffer_test_bit() local
4515 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
4516 page = eb->pages[i]; in extent_buffer_test_bit()
4517 assert_eb_page_uptodate(eb, page); in extent_buffer_test_bit()
4518 kaddr = page_address(page); in extent_buffer_test_bit()
4519 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
4528 return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr); in extent_buffer_get_byte()
4535 * @start: offset of the bitmap item in the extent buffer
4543 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_set()
4559 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_set()
4571 * @start: offset of the bitmap item in the extent buffer
4580 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_clear()
4596 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_clear()
4605 unsigned long distance = (src > dst) ? src - dst : dst - src; in areas_overlap()
4623 unsigned long cur_len = min(src_offset + len - cur_src, in memcpy_extent_buffer()
4624 PAGE_SIZE - pg_off); in memcpy_extent_buffer()
4625 void *src_addr = page_address(dst->pages[pg_index]) + pg_off; in memcpy_extent_buffer()
4639 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
4640 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
4667 src_addr = page_address(dst->pages[src_i]) + src_off_in_page - in memmove_extent_buffer()
4669 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, in memmove_extent_buffer()
4672 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur, in memmove_extent_buffer()
4675 dst_end -= cur; in memmove_extent_buffer()
4676 src_end -= cur; in memmove_extent_buffer()
4677 len -= cur; in memmove_extent_buffer()
4683 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr) in get_next_extent_buffer() argument
4687 u64 page_start = page_offset(page); in get_next_extent_buffer()
4691 lockdep_assert_held(&fs_info->buffer_lock); in get_next_extent_buffer()
4697 ret = radix_tree_gang_lookup(&fs_info->buffer_radix, in get_next_extent_buffer()
4698 (void **)gang, cur >> fs_info->sectorsize_bits, in get_next_extent_buffer()
4700 PAGE_SIZE / fs_info->nodesize)); in get_next_extent_buffer()
4704 /* Already beyond page end */ in get_next_extent_buffer()
4705 if (gang[i]->start >= page_start + PAGE_SIZE) in get_next_extent_buffer()
4708 if (gang[i]->start >= bytenr) { in get_next_extent_buffer()
4713 cur = gang[ret - 1]->start + gang[ret - 1]->len; in get_next_extent_buffer()
4719 static int try_release_subpage_extent_buffer(struct page *page) in try_release_subpage_extent_buffer() argument
4721 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); in try_release_subpage_extent_buffer()
4722 u64 cur = page_offset(page); in try_release_subpage_extent_buffer()
4723 const u64 end = page_offset(page) + PAGE_SIZE; in try_release_subpage_extent_buffer()
4730 * Unlike try_release_extent_buffer() which uses page->private in try_release_subpage_extent_buffer()
4737 spin_lock(&fs_info->buffer_lock); in try_release_subpage_extent_buffer()
4738 eb = get_next_extent_buffer(fs_info, page, cur); in try_release_subpage_extent_buffer()
4740 /* No more eb in the page range after or at cur */ in try_release_subpage_extent_buffer()
4741 spin_unlock(&fs_info->buffer_lock); in try_release_subpage_extent_buffer()
4744 cur = eb->start + eb->len; in try_release_subpage_extent_buffer()
4750 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4751 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
4752 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4753 spin_unlock(&fs_info->buffer_lock); in try_release_subpage_extent_buffer()
4756 spin_unlock(&fs_info->buffer_lock); in try_release_subpage_extent_buffer()
4763 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
4764 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4770 * check the page private at the end. And in try_release_subpage_extent_buffer()
4776 * Finally to check if we have cleared page private, as if we have in try_release_subpage_extent_buffer()
4777 * released all ebs in the page, the page private should be cleared now. in try_release_subpage_extent_buffer()
4779 spin_lock(&page->mapping->private_lock); in try_release_subpage_extent_buffer()
4780 if (!PagePrivate(page)) in try_release_subpage_extent_buffer()
4784 spin_unlock(&page->mapping->private_lock); in try_release_subpage_extent_buffer()
4789 int try_release_extent_buffer(struct page *page) in try_release_extent_buffer() argument
4793 if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) in try_release_extent_buffer()
4794 return try_release_subpage_extent_buffer(page); in try_release_extent_buffer()
4797 * We need to make sure nobody is changing page->private, as we rely on in try_release_extent_buffer()
4798 * page->private as the pointer to extent buffer. in try_release_extent_buffer()
4800 spin_lock(&page->mapping->private_lock); in try_release_extent_buffer()
4801 if (!PagePrivate(page)) { in try_release_extent_buffer()
4802 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
4806 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
4812 * this page. in try_release_extent_buffer()
4814 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
4815 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
4816 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4817 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
4820 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
4824 * so just return, this page will likely be freed soon anyway. in try_release_extent_buffer()
4826 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
4827 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4835 * btrfs_readahead_tree_block - attempt to readahead a child block
4874 * btrfs_readahead_node_child - readahead a node's child block
4883 btrfs_readahead_tree_block(node->fs_info, in btrfs_readahead_node_child()
4887 btrfs_header_level(node) - 1); in btrfs_readahead_node_child()