1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0
2cac06d84SQu Wenruo
3cac06d84SQu Wenruo #include <linux/slab.h>
49b569ea0SJosef Bacik #include "messages.h"
5cac06d84SQu Wenruo #include "ctree.h"
6cac06d84SQu Wenruo #include "subpage.h"
73d078efaSQu Wenruo #include "btrfs_inode.h"
8cac06d84SQu Wenruo
9894d1378SQu Wenruo /*
10894d1378SQu Wenruo * Subpage (sectorsize < PAGE_SIZE) support overview:
11894d1378SQu Wenruo *
12894d1378SQu Wenruo * Limitations:
13894d1378SQu Wenruo *
14894d1378SQu Wenruo * - Only support 64K page size for now
15894d1378SQu Wenruo * This is to make metadata handling easier, as 64K page would ensure
16894d1378SQu Wenruo * all nodesize would fit inside one page, thus we don't need to handle
17894d1378SQu Wenruo * cases where a tree block crosses several pages.
18894d1378SQu Wenruo *
19894d1378SQu Wenruo * - Only metadata read-write for now
20894d1378SQu Wenruo * The data read-write part is in development.
21894d1378SQu Wenruo *
22894d1378SQu Wenruo * - Metadata can't cross 64K page boundary
23894d1378SQu Wenruo * btrfs-progs and kernel have done that for a while, thus only ancient
24894d1378SQu Wenruo * filesystems could have such problem. For such case, do a graceful
25894d1378SQu Wenruo * rejection.
26894d1378SQu Wenruo *
27894d1378SQu Wenruo * Special behavior:
28894d1378SQu Wenruo *
29894d1378SQu Wenruo * - Metadata
30894d1378SQu Wenruo * Metadata read is fully supported.
31894d1378SQu Wenruo * Meaning when reading one tree block will only trigger the read for the
32894d1378SQu Wenruo * needed range, other unrelated range in the same page will not be touched.
33894d1378SQu Wenruo *
34894d1378SQu Wenruo * Metadata write support is partial.
35894d1378SQu Wenruo * The writeback is still for the full page, but we will only submit
36894d1378SQu Wenruo * the dirty extent buffers in the page.
37894d1378SQu Wenruo *
38894d1378SQu Wenruo * This means, if we have a metadata page like this:
39894d1378SQu Wenruo *
40894d1378SQu Wenruo * Page offset
41894d1378SQu Wenruo * 0 16K 32K 48K 64K
42894d1378SQu Wenruo * |/////////| |///////////|
43894d1378SQu Wenruo * \- Tree block A \- Tree block B
44894d1378SQu Wenruo *
45894d1378SQu Wenruo * Even if we just want to writeback tree block A, we will also writeback
46894d1378SQu Wenruo * tree block B if it's also dirty.
47894d1378SQu Wenruo *
48894d1378SQu Wenruo * This may cause extra metadata writeback which results more COW.
49894d1378SQu Wenruo *
50894d1378SQu Wenruo * Implementation:
51894d1378SQu Wenruo *
52894d1378SQu Wenruo * - Common
53894d1378SQu Wenruo * Both metadata and data will use a new structure, btrfs_subpage, to
54894d1378SQu Wenruo * record the status of each sector inside a page. This provides the extra
55894d1378SQu Wenruo * granularity needed.
56894d1378SQu Wenruo *
57894d1378SQu Wenruo * - Metadata
58894d1378SQu Wenruo * Since we have multiple tree blocks inside one page, we can't rely on page
59894d1378SQu Wenruo * locking anymore, or we will have greatly reduced concurrency or even
60894d1378SQu Wenruo * deadlocks (hold one tree lock while trying to lock another tree lock in
61894d1378SQu Wenruo * the same page).
62894d1378SQu Wenruo *
63894d1378SQu Wenruo * Thus for metadata locking, subpage support relies on io_tree locking only.
64894d1378SQu Wenruo * This means a slightly higher tree locking latency.
65894d1378SQu Wenruo */
66894d1378SQu Wenruo
btrfs_is_subpage(const struct btrfs_fs_info * fs_info,struct page * page)67fbca46ebSQu Wenruo bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
68fbca46ebSQu Wenruo {
69fbca46ebSQu Wenruo if (fs_info->sectorsize >= PAGE_SIZE)
70fbca46ebSQu Wenruo return false;
71fbca46ebSQu Wenruo
72fbca46ebSQu Wenruo /*
73fbca46ebSQu Wenruo * Only data pages (either through DIO or compression) can have no
74fbca46ebSQu Wenruo * mapping. And if page->mapping->host is data inode, it's subpage.
75fbca46ebSQu Wenruo * As we have ruled our sectorsize >= PAGE_SIZE case already.
76fbca46ebSQu Wenruo */
77fbca46ebSQu Wenruo if (!page->mapping || !page->mapping->host ||
78fbca46ebSQu Wenruo is_data_inode(page->mapping->host))
79fbca46ebSQu Wenruo return true;
80fbca46ebSQu Wenruo
81fbca46ebSQu Wenruo /*
82fbca46ebSQu Wenruo * Now the only remaining case is metadata, which we only go subpage
83fbca46ebSQu Wenruo * routine if nodesize < PAGE_SIZE.
84fbca46ebSQu Wenruo */
85fbca46ebSQu Wenruo if (fs_info->nodesize < PAGE_SIZE)
86fbca46ebSQu Wenruo return true;
87fbca46ebSQu Wenruo return false;
88fbca46ebSQu Wenruo }
89fbca46ebSQu Wenruo
btrfs_init_subpage_info(struct btrfs_subpage_info * subpage_info,u32 sectorsize)908481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
918481dd80SQu Wenruo {
928481dd80SQu Wenruo unsigned int cur = 0;
938481dd80SQu Wenruo unsigned int nr_bits;
948481dd80SQu Wenruo
958481dd80SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
968481dd80SQu Wenruo
978481dd80SQu Wenruo nr_bits = PAGE_SIZE / sectorsize;
988481dd80SQu Wenruo subpage_info->bitmap_nr_bits = nr_bits;
998481dd80SQu Wenruo
1008481dd80SQu Wenruo subpage_info->uptodate_offset = cur;
1018481dd80SQu Wenruo cur += nr_bits;
1028481dd80SQu Wenruo
1038481dd80SQu Wenruo subpage_info->dirty_offset = cur;
1048481dd80SQu Wenruo cur += nr_bits;
1058481dd80SQu Wenruo
1068481dd80SQu Wenruo subpage_info->writeback_offset = cur;
1078481dd80SQu Wenruo cur += nr_bits;
1088481dd80SQu Wenruo
1098481dd80SQu Wenruo subpage_info->ordered_offset = cur;
1108481dd80SQu Wenruo cur += nr_bits;
1118481dd80SQu Wenruo
112e4f94347SQu Wenruo subpage_info->checked_offset = cur;
113e4f94347SQu Wenruo cur += nr_bits;
114e4f94347SQu Wenruo
1158481dd80SQu Wenruo subpage_info->total_nr_bits = cur;
1168481dd80SQu Wenruo }
1178481dd80SQu Wenruo
btrfs_attach_subpage(const struct btrfs_fs_info * fs_info,struct page * page,enum btrfs_subpage_type type)118cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
119cac06d84SQu Wenruo struct page *page, enum btrfs_subpage_type type)
120cac06d84SQu Wenruo {
121651fb419SQu Wenruo struct btrfs_subpage *subpage;
122cac06d84SQu Wenruo
123cac06d84SQu Wenruo /*
124143823cfSDavid Sterba * We have cases like a dummy extent buffer page, which is not mapped
125cac06d84SQu Wenruo * and doesn't need to be locked.
126cac06d84SQu Wenruo */
127cac06d84SQu Wenruo if (page->mapping)
128cac06d84SQu Wenruo ASSERT(PageLocked(page));
129651fb419SQu Wenruo
130cac06d84SQu Wenruo /* Either not subpage, or the page already has private attached */
131fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
132cac06d84SQu Wenruo return 0;
133cac06d84SQu Wenruo
134651fb419SQu Wenruo subpage = btrfs_alloc_subpage(fs_info, type);
135651fb419SQu Wenruo if (IS_ERR(subpage))
136651fb419SQu Wenruo return PTR_ERR(subpage);
137651fb419SQu Wenruo
138cac06d84SQu Wenruo attach_page_private(page, subpage);
139cac06d84SQu Wenruo return 0;
140cac06d84SQu Wenruo }
141cac06d84SQu Wenruo
btrfs_detach_subpage(const struct btrfs_fs_info * fs_info,struct page * page)142cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
143cac06d84SQu Wenruo struct page *page)
144cac06d84SQu Wenruo {
145cac06d84SQu Wenruo struct btrfs_subpage *subpage;
146cac06d84SQu Wenruo
147cac06d84SQu Wenruo /* Either not subpage, or already detached */
148fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
149cac06d84SQu Wenruo return;
150cac06d84SQu Wenruo
1510d031dc4SYu Zhe subpage = detach_page_private(page);
152cac06d84SQu Wenruo ASSERT(subpage);
153760f991fSQu Wenruo btrfs_free_subpage(subpage);
154760f991fSQu Wenruo }
155760f991fSQu Wenruo
btrfs_alloc_subpage(const struct btrfs_fs_info * fs_info,enum btrfs_subpage_type type)156651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
157760f991fSQu Wenruo enum btrfs_subpage_type type)
158760f991fSQu Wenruo {
159651fb419SQu Wenruo struct btrfs_subpage *ret;
16072a69cd0SQu Wenruo unsigned int real_size;
161651fb419SQu Wenruo
162fdf250dbSQu Wenruo ASSERT(fs_info->sectorsize < PAGE_SIZE);
163760f991fSQu Wenruo
16472a69cd0SQu Wenruo real_size = struct_size(ret, bitmaps,
16572a69cd0SQu Wenruo BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
16672a69cd0SQu Wenruo ret = kzalloc(real_size, GFP_NOFS);
167651fb419SQu Wenruo if (!ret)
168651fb419SQu Wenruo return ERR_PTR(-ENOMEM);
169651fb419SQu Wenruo
170651fb419SQu Wenruo spin_lock_init(&ret->lock);
1711e1de387SQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) {
172651fb419SQu Wenruo atomic_set(&ret->eb_refs, 0);
1731e1de387SQu Wenruo } else {
174651fb419SQu Wenruo atomic_set(&ret->readers, 0);
175651fb419SQu Wenruo atomic_set(&ret->writers, 0);
1761e1de387SQu Wenruo }
177651fb419SQu Wenruo return ret;
178760f991fSQu Wenruo }
179760f991fSQu Wenruo
btrfs_free_subpage(struct btrfs_subpage * subpage)180760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage)
181760f991fSQu Wenruo {
182cac06d84SQu Wenruo kfree(subpage);
183cac06d84SQu Wenruo }
1848ff8466dSQu Wenruo
1858ff8466dSQu Wenruo /*
1868ff8466dSQu Wenruo * Increase the eb_refs of current subpage.
1878ff8466dSQu Wenruo *
1888ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing
1898ff8466dSQu Wenruo * of the same page.
1908ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree,
1918ff8466dSQu Wenruo * detach_extent_buffer_page() won't detach the page private while we're still
1928ff8466dSQu Wenruo * allocating the extent buffer.
1938ff8466dSQu Wenruo */
btrfs_page_inc_eb_refs(const struct btrfs_fs_info * fs_info,struct page * page)1948ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
1958ff8466dSQu Wenruo struct page *page)
1968ff8466dSQu Wenruo {
1978ff8466dSQu Wenruo struct btrfs_subpage *subpage;
1988ff8466dSQu Wenruo
199fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page))
2008ff8466dSQu Wenruo return;
2018ff8466dSQu Wenruo
2028ff8466dSQu Wenruo ASSERT(PagePrivate(page) && page->mapping);
2038ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock);
2048ff8466dSQu Wenruo
2058ff8466dSQu Wenruo subpage = (struct btrfs_subpage *)page->private;
2068ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs);
2078ff8466dSQu Wenruo }
2088ff8466dSQu Wenruo
btrfs_page_dec_eb_refs(const struct btrfs_fs_info * fs_info,struct page * page)2098ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
2108ff8466dSQu Wenruo struct page *page)
2118ff8466dSQu Wenruo {
2128ff8466dSQu Wenruo struct btrfs_subpage *subpage;
2138ff8466dSQu Wenruo
214fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page))
2158ff8466dSQu Wenruo return;
2168ff8466dSQu Wenruo
2178ff8466dSQu Wenruo ASSERT(PagePrivate(page) && page->mapping);
2188ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock);
2198ff8466dSQu Wenruo
2208ff8466dSQu Wenruo subpage = (struct btrfs_subpage *)page->private;
2218ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs));
2228ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs);
2238ff8466dSQu Wenruo }
224a1d767c1SQu Wenruo
btrfs_subpage_assert(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)22592082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
22692082d40SQu Wenruo struct page *page, u64 start, u32 len)
22792082d40SQu Wenruo {
22892082d40SQu Wenruo /* Basic checks */
22992082d40SQu Wenruo ASSERT(PagePrivate(page) && page->private);
23092082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
23192082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize));
23292082d40SQu Wenruo /*
23392082d40SQu Wenruo * The range check only works for mapped page, we can still have
23492082d40SQu Wenruo * unmapped page like dummy extent buffer pages.
23592082d40SQu Wenruo */
23692082d40SQu Wenruo if (page->mapping)
23792082d40SQu Wenruo ASSERT(page_offset(page) <= start &&
23892082d40SQu Wenruo start + len <= page_offset(page) + PAGE_SIZE);
23992082d40SQu Wenruo }
24092082d40SQu Wenruo
btrfs_subpage_start_reader(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)24192082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
24292082d40SQu Wenruo struct page *page, u64 start, u32 len)
24392082d40SQu Wenruo {
24492082d40SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
24592082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits;
24692082d40SQu Wenruo
24792082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len);
24892082d40SQu Wenruo
2493d078efaSQu Wenruo atomic_add(nbits, &subpage->readers);
25092082d40SQu Wenruo }
25192082d40SQu Wenruo
btrfs_subpage_end_reader(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)25292082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
25392082d40SQu Wenruo struct page *page, u64 start, u32 len)
25492082d40SQu Wenruo {
25592082d40SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
25692082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits;
2573d078efaSQu Wenruo bool is_data;
2583d078efaSQu Wenruo bool last;
25992082d40SQu Wenruo
26092082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len);
2613d078efaSQu Wenruo is_data = is_data_inode(page->mapping->host);
26292082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits);
2633d078efaSQu Wenruo last = atomic_sub_and_test(nbits, &subpage->readers);
2643d078efaSQu Wenruo
2653d078efaSQu Wenruo /*
2663d078efaSQu Wenruo * For data we need to unlock the page if the last read has finished.
2673d078efaSQu Wenruo *
2683d078efaSQu Wenruo * And please don't replace @last with atomic_sub_and_test() call
2693d078efaSQu Wenruo * inside if () condition.
2703d078efaSQu Wenruo * As we want the atomic_sub_and_test() to be always executed.
2713d078efaSQu Wenruo */
2723d078efaSQu Wenruo if (is_data && last)
27392082d40SQu Wenruo unlock_page(page);
27492082d40SQu Wenruo }
27592082d40SQu Wenruo
btrfs_subpage_clamp_range(struct page * page,u64 * start,u32 * len)2761e1de387SQu Wenruo static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
2771e1de387SQu Wenruo {
2781e1de387SQu Wenruo u64 orig_start = *start;
2791e1de387SQu Wenruo u32 orig_len = *len;
2801e1de387SQu Wenruo
2811e1de387SQu Wenruo *start = max_t(u64, page_offset(page), orig_start);
282e4f94347SQu Wenruo /*
283e4f94347SQu Wenruo * For certain call sites like btrfs_drop_pages(), we may have pages
284e4f94347SQu Wenruo * beyond the target range. In that case, just set @len to 0, subpage
285e4f94347SQu Wenruo * helpers can handle @len == 0 without any problem.
286e4f94347SQu Wenruo */
287e4f94347SQu Wenruo if (page_offset(page) >= orig_start + orig_len)
288e4f94347SQu Wenruo *len = 0;
289e4f94347SQu Wenruo else
2901e1de387SQu Wenruo *len = min_t(u64, page_offset(page) + PAGE_SIZE,
2911e1de387SQu Wenruo orig_start + orig_len) - *start;
2921e1de387SQu Wenruo }
2931e1de387SQu Wenruo
btrfs_subpage_start_writer(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)2941e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
2951e1de387SQu Wenruo struct page *page, u64 start, u32 len)
2961e1de387SQu Wenruo {
2971e1de387SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
2981e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits);
2991e1de387SQu Wenruo int ret;
3001e1de387SQu Wenruo
3011e1de387SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len);
3021e1de387SQu Wenruo
3031e1de387SQu Wenruo ASSERT(atomic_read(&subpage->readers) == 0);
3041e1de387SQu Wenruo ret = atomic_add_return(nbits, &subpage->writers);
3051e1de387SQu Wenruo ASSERT(ret == nbits);
3061e1de387SQu Wenruo }
3071e1de387SQu Wenruo
btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)3081e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
3091e1de387SQu Wenruo struct page *page, u64 start, u32 len)
3101e1de387SQu Wenruo {
3111e1de387SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3121e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits);
3131e1de387SQu Wenruo
3141e1de387SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len);
3151e1de387SQu Wenruo
316164674a7SQu Wenruo /*
317164674a7SQu Wenruo * We have call sites passing @lock_page into
318164674a7SQu Wenruo * extent_clear_unlock_delalloc() for compression path.
319164674a7SQu Wenruo *
320164674a7SQu Wenruo * This @locked_page is locked by plain lock_page(), thus its
321164674a7SQu Wenruo * subpage::writers is 0. Handle them in a special way.
322164674a7SQu Wenruo */
323164674a7SQu Wenruo if (atomic_read(&subpage->writers) == 0)
324164674a7SQu Wenruo return true;
325164674a7SQu Wenruo
3261e1de387SQu Wenruo ASSERT(atomic_read(&subpage->writers) >= nbits);
3271e1de387SQu Wenruo return atomic_sub_and_test(nbits, &subpage->writers);
3281e1de387SQu Wenruo }
3291e1de387SQu Wenruo
3301e1de387SQu Wenruo /*
3311e1de387SQu Wenruo * Lock a page for delalloc page writeback.
3321e1de387SQu Wenruo *
3331e1de387SQu Wenruo * Return -EAGAIN if the page is not properly initialized.
3341e1de387SQu Wenruo * Return 0 with the page locked, and writer counter updated.
3351e1de387SQu Wenruo *
3361e1de387SQu Wenruo * Even with 0 returned, the page still need extra check to make sure
3371e1de387SQu Wenruo * it's really the correct page, as the caller is using
33847d55419SVishal Moola (Oracle) * filemap_get_folios_contig(), which can race with page invalidating.
3391e1de387SQu Wenruo */
btrfs_page_start_writer_lock(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)3401e1de387SQu Wenruo int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
3411e1de387SQu Wenruo struct page *page, u64 start, u32 len)
3421e1de387SQu Wenruo {
343fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
3441e1de387SQu Wenruo lock_page(page);
3451e1de387SQu Wenruo return 0;
3461e1de387SQu Wenruo }
3471e1de387SQu Wenruo lock_page(page);
3481e1de387SQu Wenruo if (!PagePrivate(page) || !page->private) {
3491e1de387SQu Wenruo unlock_page(page);
3501e1de387SQu Wenruo return -EAGAIN;
3511e1de387SQu Wenruo }
3521e1de387SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len);
3531e1de387SQu Wenruo btrfs_subpage_start_writer(fs_info, page, start, len);
3541e1de387SQu Wenruo return 0;
3551e1de387SQu Wenruo }
3561e1de387SQu Wenruo
btrfs_page_end_writer_lock(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)3571e1de387SQu Wenruo void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
3581e1de387SQu Wenruo struct page *page, u64 start, u32 len)
3591e1de387SQu Wenruo {
360fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
3611e1de387SQu Wenruo return unlock_page(page);
3621e1de387SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len);
3631e1de387SQu Wenruo if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
3641e1de387SQu Wenruo unlock_page(page);
3651e1de387SQu Wenruo }
3661e1de387SQu Wenruo
36772a69cd0SQu Wenruo #define subpage_calc_start_bit(fs_info, page, name, start, len) \
36872a69cd0SQu Wenruo ({ \
36972a69cd0SQu Wenruo unsigned int start_bit; \
37072a69cd0SQu Wenruo \
37172a69cd0SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); \
37272a69cd0SQu Wenruo start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
37372a69cd0SQu Wenruo start_bit += fs_info->subpage_info->name##_offset; \
37472a69cd0SQu Wenruo start_bit; \
37572a69cd0SQu Wenruo })
37672a69cd0SQu Wenruo
37772a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
37872a69cd0SQu Wenruo bitmap_test_range_all_set(subpage->bitmaps, \
37972a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \
38072a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits)
38172a69cd0SQu Wenruo
38272a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
38372a69cd0SQu Wenruo bitmap_test_range_all_zero(subpage->bitmaps, \
38472a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \
38572a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits)
38672a69cd0SQu Wenruo
btrfs_subpage_set_uptodate(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)387a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
388a1d767c1SQu Wenruo struct page *page, u64 start, u32 len)
389a1d767c1SQu Wenruo {
390a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
39172a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
39272a69cd0SQu Wenruo uptodate, start, len);
393a1d767c1SQu Wenruo unsigned long flags;
394a1d767c1SQu Wenruo
395a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
39672a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
39772a69cd0SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
398a1d767c1SQu Wenruo SetPageUptodate(page);
399a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
400a1d767c1SQu Wenruo }
401a1d767c1SQu Wenruo
btrfs_subpage_clear_uptodate(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)402a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
403a1d767c1SQu Wenruo struct page *page, u64 start, u32 len)
404a1d767c1SQu Wenruo {
405a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
40672a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
40772a69cd0SQu Wenruo uptodate, start, len);
408a1d767c1SQu Wenruo unsigned long flags;
409a1d767c1SQu Wenruo
410a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
41172a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
412a1d767c1SQu Wenruo ClearPageUptodate(page);
413a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
414a1d767c1SQu Wenruo }
415a1d767c1SQu Wenruo
btrfs_subpage_set_dirty(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)416d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
417d8a5713eSQu Wenruo struct page *page, u64 start, u32 len)
418d8a5713eSQu Wenruo {
419d8a5713eSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
42072a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
42172a69cd0SQu Wenruo dirty, start, len);
422d8a5713eSQu Wenruo unsigned long flags;
423d8a5713eSQu Wenruo
424d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
42572a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
426d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
427d8a5713eSQu Wenruo set_page_dirty(page);
428d8a5713eSQu Wenruo }
429d8a5713eSQu Wenruo
430d8a5713eSQu Wenruo /*
431d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap.
432d8a5713eSQu Wenruo *
433d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the
434d8a5713eSQu Wenruo * dirty_bitmap.
435d8a5713eSQu Wenruo * Return false otherwise.
436d8a5713eSQu Wenruo *
437d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have
438d8a5713eSQu Wenruo * extra handling for tree blocks.
439d8a5713eSQu Wenruo */
btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)440d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
441d8a5713eSQu Wenruo struct page *page, u64 start, u32 len)
442d8a5713eSQu Wenruo {
443d8a5713eSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
44472a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
44572a69cd0SQu Wenruo dirty, start, len);
446d8a5713eSQu Wenruo unsigned long flags;
447d8a5713eSQu Wenruo bool last = false;
448d8a5713eSQu Wenruo
449d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
45072a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
45172a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
452d8a5713eSQu Wenruo last = true;
453d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
454d8a5713eSQu Wenruo return last;
455d8a5713eSQu Wenruo }
456d8a5713eSQu Wenruo
btrfs_subpage_clear_dirty(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)457d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
458d8a5713eSQu Wenruo struct page *page, u64 start, u32 len)
459d8a5713eSQu Wenruo {
460d8a5713eSQu Wenruo bool last;
461d8a5713eSQu Wenruo
462d8a5713eSQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
463d8a5713eSQu Wenruo if (last)
464d8a5713eSQu Wenruo clear_page_dirty_for_io(page);
465d8a5713eSQu Wenruo }
466d8a5713eSQu Wenruo
btrfs_subpage_set_writeback(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)4673470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
4683470da3bSQu Wenruo struct page *page, u64 start, u32 len)
4693470da3bSQu Wenruo {
4703470da3bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
47172a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
47272a69cd0SQu Wenruo writeback, start, len);
4733470da3bSQu Wenruo unsigned long flags;
4743470da3bSQu Wenruo
4753470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
47672a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
4773470da3bSQu Wenruo set_page_writeback(page);
4783470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
4793470da3bSQu Wenruo }
4803470da3bSQu Wenruo
btrfs_subpage_clear_writeback(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)4813470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
4823470da3bSQu Wenruo struct page *page, u64 start, u32 len)
4833470da3bSQu Wenruo {
4843470da3bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
48572a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
48672a69cd0SQu Wenruo writeback, start, len);
4873470da3bSQu Wenruo unsigned long flags;
4883470da3bSQu Wenruo
4893470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
49072a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
49172a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
4927c11d0aeSQu Wenruo ASSERT(PageWriteback(page));
4933470da3bSQu Wenruo end_page_writeback(page);
4947c11d0aeSQu Wenruo }
4953470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
4963470da3bSQu Wenruo }
4973470da3bSQu Wenruo
btrfs_subpage_set_ordered(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)4986f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
4996f17400bSQu Wenruo struct page *page, u64 start, u32 len)
5006f17400bSQu Wenruo {
5016f17400bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
50272a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
50372a69cd0SQu Wenruo ordered, start, len);
5046f17400bSQu Wenruo unsigned long flags;
5056f17400bSQu Wenruo
5066f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
50772a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
5086f17400bSQu Wenruo SetPageOrdered(page);
5096f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
5106f17400bSQu Wenruo }
5116f17400bSQu Wenruo
btrfs_subpage_clear_ordered(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)5126f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
5136f17400bSQu Wenruo struct page *page, u64 start, u32 len)
5146f17400bSQu Wenruo {
5156f17400bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
51672a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
51772a69cd0SQu Wenruo ordered, start, len);
5186f17400bSQu Wenruo unsigned long flags;
5196f17400bSQu Wenruo
5206f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
52172a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
52272a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
5236f17400bSQu Wenruo ClearPageOrdered(page);
5246f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
5256f17400bSQu Wenruo }
526e4f94347SQu Wenruo
btrfs_subpage_set_checked(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)527e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
528e4f94347SQu Wenruo struct page *page, u64 start, u32 len)
529e4f94347SQu Wenruo {
530e4f94347SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
531e4f94347SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
532e4f94347SQu Wenruo checked, start, len);
533e4f94347SQu Wenruo unsigned long flags;
534e4f94347SQu Wenruo
535e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
536e4f94347SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
537e4f94347SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
538e4f94347SQu Wenruo SetPageChecked(page);
539e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
540e4f94347SQu Wenruo }
541e4f94347SQu Wenruo
btrfs_subpage_clear_checked(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)542e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
543e4f94347SQu Wenruo struct page *page, u64 start, u32 len)
544e4f94347SQu Wenruo {
545e4f94347SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
546e4f94347SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
547e4f94347SQu Wenruo checked, start, len);
548e4f94347SQu Wenruo unsigned long flags;
549e4f94347SQu Wenruo
550e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
551e4f94347SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
552e4f94347SQu Wenruo ClearPageChecked(page);
553e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
554e4f94347SQu Wenruo }
555e4f94347SQu Wenruo
556a1d767c1SQu Wenruo /*
557a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits
558a1d767c1SQu Wenruo * are tested in the same way.
559a1d767c1SQu Wenruo */
560a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
561a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
562a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \
563a1d767c1SQu Wenruo { \
564a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
56572a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
56672a69cd0SQu Wenruo name, start, len); \
567a1d767c1SQu Wenruo unsigned long flags; \
568a1d767c1SQu Wenruo bool ret; \
569a1d767c1SQu Wenruo \
570a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \
57172a69cd0SQu Wenruo ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
57272a69cd0SQu Wenruo len >> fs_info->sectorsize_bits); \
573a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \
574a1d767c1SQu Wenruo return ret; \
575a1d767c1SQu Wenruo }
576a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
577d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
5783470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
5796f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
580e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
581a1d767c1SQu Wenruo
582a1d767c1SQu Wenruo /*
583a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
584a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
585a1d767c1SQu Wenruo * back to regular sectorsize branch.
586a1d767c1SQu Wenruo */
587a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
588a1d767c1SQu Wenruo test_page_func) \
589a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
590a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \
591a1d767c1SQu Wenruo { \
592fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
593a1d767c1SQu Wenruo set_page_func(page); \
594a1d767c1SQu Wenruo return; \
595a1d767c1SQu Wenruo } \
596a1d767c1SQu Wenruo btrfs_subpage_set_##name(fs_info, page, start, len); \
597a1d767c1SQu Wenruo } \
598a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
599a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \
600a1d767c1SQu Wenruo { \
601fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
602a1d767c1SQu Wenruo clear_page_func(page); \
603a1d767c1SQu Wenruo return; \
604a1d767c1SQu Wenruo } \
605a1d767c1SQu Wenruo btrfs_subpage_clear_##name(fs_info, page, start, len); \
606a1d767c1SQu Wenruo } \
607a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
608a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \
609a1d767c1SQu Wenruo { \
610fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
611a1d767c1SQu Wenruo return test_page_func(page); \
612a1d767c1SQu Wenruo return btrfs_subpage_test_##name(fs_info, page, start, len); \
61360e2d255SQu Wenruo } \
61460e2d255SQu Wenruo void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
61560e2d255SQu Wenruo struct page *page, u64 start, u32 len) \
61660e2d255SQu Wenruo { \
617fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
61860e2d255SQu Wenruo set_page_func(page); \
61960e2d255SQu Wenruo return; \
62060e2d255SQu Wenruo } \
62160e2d255SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); \
62260e2d255SQu Wenruo btrfs_subpage_set_##name(fs_info, page, start, len); \
62360e2d255SQu Wenruo } \
62460e2d255SQu Wenruo void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
62560e2d255SQu Wenruo struct page *page, u64 start, u32 len) \
62660e2d255SQu Wenruo { \
627fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
62860e2d255SQu Wenruo clear_page_func(page); \
62960e2d255SQu Wenruo return; \
63060e2d255SQu Wenruo } \
63160e2d255SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); \
63260e2d255SQu Wenruo btrfs_subpage_clear_##name(fs_info, page, start, len); \
63360e2d255SQu Wenruo } \
63460e2d255SQu Wenruo bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
63560e2d255SQu Wenruo struct page *page, u64 start, u32 len) \
63660e2d255SQu Wenruo { \
637fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
63860e2d255SQu Wenruo return test_page_func(page); \
63960e2d255SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); \
64060e2d255SQu Wenruo return btrfs_subpage_test_##name(fs_info, page, start, len); \
641a1d767c1SQu Wenruo }
642a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
643a1d767c1SQu Wenruo PageUptodate);
644d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
645d8a5713eSQu Wenruo PageDirty);
6463470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
6473470da3bSQu Wenruo PageWriteback);
6486f17400bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
6496f17400bSQu Wenruo PageOrdered);
650e4f94347SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
651cc1d0d93SQu Wenruo
652cc1d0d93SQu Wenruo /*
653cc1d0d93SQu Wenruo * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
654cc1d0d93SQu Wenruo * is cleared.
655cc1d0d93SQu Wenruo */
btrfs_page_assert_not_dirty(const struct btrfs_fs_info * fs_info,struct page * page)656cc1d0d93SQu Wenruo void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
657cc1d0d93SQu Wenruo struct page *page)
658cc1d0d93SQu Wenruo {
659cc1d0d93SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
660cc1d0d93SQu Wenruo
661cc1d0d93SQu Wenruo if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
662cc1d0d93SQu Wenruo return;
663cc1d0d93SQu Wenruo
664cc1d0d93SQu Wenruo ASSERT(!PageDirty(page));
665fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page))
666cc1d0d93SQu Wenruo return;
667cc1d0d93SQu Wenruo
668cc1d0d93SQu Wenruo ASSERT(PagePrivate(page) && page->private);
66972a69cd0SQu Wenruo ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
670cc1d0d93SQu Wenruo }
671e55a0de1SQu Wenruo
672e55a0de1SQu Wenruo /*
673e55a0de1SQu Wenruo * Handle different locked pages with different page sizes:
674e55a0de1SQu Wenruo *
675e55a0de1SQu Wenruo * - Page locked by plain lock_page()
676e55a0de1SQu Wenruo * It should not have any subpage::writers count.
677e55a0de1SQu Wenruo * Can be unlocked by unlock_page().
678e55a0de1SQu Wenruo * This is the most common locked page for __extent_writepage() called
679f3e90c1cSChristoph Hellwig * inside extent_write_cache_pages().
680e55a0de1SQu Wenruo * Rarer cases include the @locked_page from extent_write_locked_range().
681e55a0de1SQu Wenruo *
682e55a0de1SQu Wenruo * - Page locked by lock_delalloc_pages()
683e55a0de1SQu Wenruo * There is only one caller, all pages except @locked_page for
684e55a0de1SQu Wenruo * extent_write_locked_range().
685e55a0de1SQu Wenruo * In this case, we have to call subpage helper to handle the case.
686e55a0de1SQu Wenruo */
btrfs_page_unlock_writer(struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)687e55a0de1SQu Wenruo void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
688e55a0de1SQu Wenruo u64 start, u32 len)
689e55a0de1SQu Wenruo {
690e55a0de1SQu Wenruo struct btrfs_subpage *subpage;
691e55a0de1SQu Wenruo
692e55a0de1SQu Wenruo ASSERT(PageLocked(page));
693fbca46ebSQu Wenruo /* For non-subpage case, we just unlock the page */
694fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page))
695e55a0de1SQu Wenruo return unlock_page(page);
696e55a0de1SQu Wenruo
697e55a0de1SQu Wenruo ASSERT(PagePrivate(page) && page->private);
698e55a0de1SQu Wenruo subpage = (struct btrfs_subpage *)page->private;
699e55a0de1SQu Wenruo
700e55a0de1SQu Wenruo /*
701e55a0de1SQu Wenruo * For subpage case, there are two types of locked page. With or
702e55a0de1SQu Wenruo * without writers number.
703e55a0de1SQu Wenruo *
704e55a0de1SQu Wenruo * Since we own the page lock, no one else could touch subpage::writers
705e55a0de1SQu Wenruo * and we are safe to do several atomic operations without spinlock.
706e55a0de1SQu Wenruo */
707c992fa1fSQu Wenruo if (atomic_read(&subpage->writers) == 0)
708e55a0de1SQu Wenruo /* No writers, locked by plain lock_page() */
709e55a0de1SQu Wenruo return unlock_page(page);
710e55a0de1SQu Wenruo
711e55a0de1SQu Wenruo /* Have writers, use proper subpage helper to end it */
712e55a0de1SQu Wenruo btrfs_page_end_writer_lock(fs_info, page, start, len);
713e55a0de1SQu Wenruo }
71475258f20SQu Wenruo
71575258f20SQu Wenruo #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
716*0131bf19SQu Wenruo { \
717*0131bf19SQu Wenruo const int bitmap_nr_bits = subpage_info->bitmap_nr_bits; \
718*0131bf19SQu Wenruo \
719*0131bf19SQu Wenruo ASSERT(bitmap_nr_bits < BITS_PER_LONG); \
720*0131bf19SQu Wenruo *dst = bitmap_read(subpage->bitmaps, \
721*0131bf19SQu Wenruo subpage_info->name##_offset, \
722*0131bf19SQu Wenruo bitmap_nr_bits); \
723*0131bf19SQu Wenruo }
72475258f20SQu Wenruo
btrfs_subpage_dump_bitmap(const struct btrfs_fs_info * fs_info,struct page * page,u64 start,u32 len)72575258f20SQu Wenruo void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
72675258f20SQu Wenruo struct page *page, u64 start, u32 len)
72775258f20SQu Wenruo {
72875258f20SQu Wenruo struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
72975258f20SQu Wenruo struct btrfs_subpage *subpage;
73075258f20SQu Wenruo unsigned long uptodate_bitmap;
73175258f20SQu Wenruo unsigned long error_bitmap;
73275258f20SQu Wenruo unsigned long dirty_bitmap;
73375258f20SQu Wenruo unsigned long writeback_bitmap;
73475258f20SQu Wenruo unsigned long ordered_bitmap;
73575258f20SQu Wenruo unsigned long checked_bitmap;
73675258f20SQu Wenruo unsigned long flags;
73775258f20SQu Wenruo
73875258f20SQu Wenruo ASSERT(PagePrivate(page) && page->private);
73975258f20SQu Wenruo ASSERT(subpage_info);
74075258f20SQu Wenruo subpage = (struct btrfs_subpage *)page->private;
74175258f20SQu Wenruo
74275258f20SQu Wenruo spin_lock_irqsave(&subpage->lock, flags);
74375258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
74475258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
74575258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
74675258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
74775258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
74875258f20SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags);
74975258f20SQu Wenruo
75075258f20SQu Wenruo dump_page(page, "btrfs subpage dump");
75175258f20SQu Wenruo btrfs_warn(fs_info,
75275258f20SQu Wenruo "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
75375258f20SQu Wenruo start, len, page_offset(page),
75475258f20SQu Wenruo subpage_info->bitmap_nr_bits, &uptodate_bitmap,
75575258f20SQu Wenruo subpage_info->bitmap_nr_bits, &error_bitmap,
75675258f20SQu Wenruo subpage_info->bitmap_nr_bits, &dirty_bitmap,
75775258f20SQu Wenruo subpage_info->bitmap_nr_bits, &writeback_bitmap,
75875258f20SQu Wenruo subpage_info->bitmap_nr_bits, &ordered_bitmap,
75975258f20SQu Wenruo subpage_info->bitmap_nr_bits, &checked_bitmap);
76075258f20SQu Wenruo }
761