xref: /openbmc/linux/fs/btrfs/subpage.c (revision c992fa1f)
1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0
2cac06d84SQu Wenruo 
3cac06d84SQu Wenruo #include <linux/slab.h>
4cac06d84SQu Wenruo #include "ctree.h"
5cac06d84SQu Wenruo #include "subpage.h"
63d078efaSQu Wenruo #include "btrfs_inode.h"
7cac06d84SQu Wenruo 
8894d1378SQu Wenruo /*
9894d1378SQu Wenruo  * Subpage (sectorsize < PAGE_SIZE) support overview:
10894d1378SQu Wenruo  *
11894d1378SQu Wenruo  * Limitations:
12894d1378SQu Wenruo  *
13894d1378SQu Wenruo  * - Only support 64K page size for now
14894d1378SQu Wenruo  *   This is to make metadata handling easier, as 64K page would ensure
15894d1378SQu Wenruo  *   all nodesize would fit inside one page, thus we don't need to handle
16894d1378SQu Wenruo  *   cases where a tree block crosses several pages.
17894d1378SQu Wenruo  *
18894d1378SQu Wenruo  * - Only metadata read-write for now
19894d1378SQu Wenruo  *   The data read-write part is in development.
20894d1378SQu Wenruo  *
21894d1378SQu Wenruo  * - Metadata can't cross 64K page boundary
22894d1378SQu Wenruo  *   btrfs-progs and kernel have done that for a while, thus only ancient
23894d1378SQu Wenruo  *   filesystems could have such problem.  For such case, do a graceful
24894d1378SQu Wenruo  *   rejection.
25894d1378SQu Wenruo  *
26894d1378SQu Wenruo  * Special behavior:
27894d1378SQu Wenruo  *
28894d1378SQu Wenruo  * - Metadata
29894d1378SQu Wenruo  *   Metadata read is fully supported.
30894d1378SQu Wenruo  *   Meaning when reading one tree block will only trigger the read for the
31894d1378SQu Wenruo  *   needed range, other unrelated range in the same page will not be touched.
32894d1378SQu Wenruo  *
33894d1378SQu Wenruo  *   Metadata write support is partial.
34894d1378SQu Wenruo  *   The writeback is still for the full page, but we will only submit
35894d1378SQu Wenruo  *   the dirty extent buffers in the page.
36894d1378SQu Wenruo  *
37894d1378SQu Wenruo  *   This means, if we have a metadata page like this:
38894d1378SQu Wenruo  *
39894d1378SQu Wenruo  *   Page offset
40894d1378SQu Wenruo  *   0         16K         32K         48K        64K
41894d1378SQu Wenruo  *   |/////////|           |///////////|
42894d1378SQu Wenruo  *        \- Tree block A        \- Tree block B
43894d1378SQu Wenruo  *
44894d1378SQu Wenruo  *   Even if we just want to writeback tree block A, we will also writeback
45894d1378SQu Wenruo  *   tree block B if it's also dirty.
46894d1378SQu Wenruo  *
47894d1378SQu Wenruo  *   This may cause extra metadata writeback which results more COW.
48894d1378SQu Wenruo  *
49894d1378SQu Wenruo  * Implementation:
50894d1378SQu Wenruo  *
51894d1378SQu Wenruo  * - Common
52894d1378SQu Wenruo  *   Both metadata and data will use a new structure, btrfs_subpage, to
53894d1378SQu Wenruo  *   record the status of each sector inside a page.  This provides the extra
54894d1378SQu Wenruo  *   granularity needed.
55894d1378SQu Wenruo  *
56894d1378SQu Wenruo  * - Metadata
57894d1378SQu Wenruo  *   Since we have multiple tree blocks inside one page, we can't rely on page
58894d1378SQu Wenruo  *   locking anymore, or we will have greatly reduced concurrency or even
59894d1378SQu Wenruo  *   deadlocks (hold one tree lock while trying to lock another tree lock in
60894d1378SQu Wenruo  *   the same page).
61894d1378SQu Wenruo  *
62894d1378SQu Wenruo  *   Thus for metadata locking, subpage support relies on io_tree locking only.
63894d1378SQu Wenruo  *   This means a slightly higher tree locking latency.
64894d1378SQu Wenruo  */
65894d1378SQu Wenruo 
668481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
678481dd80SQu Wenruo {
688481dd80SQu Wenruo 	unsigned int cur = 0;
698481dd80SQu Wenruo 	unsigned int nr_bits;
708481dd80SQu Wenruo 
718481dd80SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
728481dd80SQu Wenruo 
738481dd80SQu Wenruo 	nr_bits = PAGE_SIZE / sectorsize;
748481dd80SQu Wenruo 	subpage_info->bitmap_nr_bits = nr_bits;
758481dd80SQu Wenruo 
768481dd80SQu Wenruo 	subpage_info->uptodate_offset = cur;
778481dd80SQu Wenruo 	cur += nr_bits;
788481dd80SQu Wenruo 
798481dd80SQu Wenruo 	subpage_info->error_offset = cur;
808481dd80SQu Wenruo 	cur += nr_bits;
818481dd80SQu Wenruo 
828481dd80SQu Wenruo 	subpage_info->dirty_offset = cur;
838481dd80SQu Wenruo 	cur += nr_bits;
848481dd80SQu Wenruo 
858481dd80SQu Wenruo 	subpage_info->writeback_offset = cur;
868481dd80SQu Wenruo 	cur += nr_bits;
878481dd80SQu Wenruo 
888481dd80SQu Wenruo 	subpage_info->ordered_offset = cur;
898481dd80SQu Wenruo 	cur += nr_bits;
908481dd80SQu Wenruo 
91e4f94347SQu Wenruo 	subpage_info->checked_offset = cur;
92e4f94347SQu Wenruo 	cur += nr_bits;
93e4f94347SQu Wenruo 
948481dd80SQu Wenruo 	subpage_info->total_nr_bits = cur;
958481dd80SQu Wenruo }
968481dd80SQu Wenruo 
97cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
98cac06d84SQu Wenruo 			 struct page *page, enum btrfs_subpage_type type)
99cac06d84SQu Wenruo {
100651fb419SQu Wenruo 	struct btrfs_subpage *subpage;
101cac06d84SQu Wenruo 
102cac06d84SQu Wenruo 	/*
103cac06d84SQu Wenruo 	 * We have cases like a dummy extent buffer page, which is not mappped
104cac06d84SQu Wenruo 	 * and doesn't need to be locked.
105cac06d84SQu Wenruo 	 */
106cac06d84SQu Wenruo 	if (page->mapping)
107cac06d84SQu Wenruo 		ASSERT(PageLocked(page));
108651fb419SQu Wenruo 
109cac06d84SQu Wenruo 	/* Either not subpage, or the page already has private attached */
110cac06d84SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
111cac06d84SQu Wenruo 		return 0;
112cac06d84SQu Wenruo 
113651fb419SQu Wenruo 	subpage = btrfs_alloc_subpage(fs_info, type);
114651fb419SQu Wenruo 	if (IS_ERR(subpage))
115651fb419SQu Wenruo 		return  PTR_ERR(subpage);
116651fb419SQu Wenruo 
117cac06d84SQu Wenruo 	attach_page_private(page, subpage);
118cac06d84SQu Wenruo 	return 0;
119cac06d84SQu Wenruo }
120cac06d84SQu Wenruo 
121cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
122cac06d84SQu Wenruo 			  struct page *page)
123cac06d84SQu Wenruo {
124cac06d84SQu Wenruo 	struct btrfs_subpage *subpage;
125cac06d84SQu Wenruo 
126cac06d84SQu Wenruo 	/* Either not subpage, or already detached */
127cac06d84SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
128cac06d84SQu Wenruo 		return;
129cac06d84SQu Wenruo 
130cac06d84SQu Wenruo 	subpage = (struct btrfs_subpage *)detach_page_private(page);
131cac06d84SQu Wenruo 	ASSERT(subpage);
132760f991fSQu Wenruo 	btrfs_free_subpage(subpage);
133760f991fSQu Wenruo }
134760f991fSQu Wenruo 
135651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
136760f991fSQu Wenruo 					  enum btrfs_subpage_type type)
137760f991fSQu Wenruo {
138651fb419SQu Wenruo 	struct btrfs_subpage *ret;
13972a69cd0SQu Wenruo 	unsigned int real_size;
140651fb419SQu Wenruo 
141fdf250dbSQu Wenruo 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
142760f991fSQu Wenruo 
14372a69cd0SQu Wenruo 	real_size = struct_size(ret, bitmaps,
14472a69cd0SQu Wenruo 			BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
14572a69cd0SQu Wenruo 	ret = kzalloc(real_size, GFP_NOFS);
146651fb419SQu Wenruo 	if (!ret)
147651fb419SQu Wenruo 		return ERR_PTR(-ENOMEM);
148651fb419SQu Wenruo 
149651fb419SQu Wenruo 	spin_lock_init(&ret->lock);
1501e1de387SQu Wenruo 	if (type == BTRFS_SUBPAGE_METADATA) {
151651fb419SQu Wenruo 		atomic_set(&ret->eb_refs, 0);
1521e1de387SQu Wenruo 	} else {
153651fb419SQu Wenruo 		atomic_set(&ret->readers, 0);
154651fb419SQu Wenruo 		atomic_set(&ret->writers, 0);
1551e1de387SQu Wenruo 	}
156651fb419SQu Wenruo 	return ret;
157760f991fSQu Wenruo }
158760f991fSQu Wenruo 
159760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage)
160760f991fSQu Wenruo {
161cac06d84SQu Wenruo 	kfree(subpage);
162cac06d84SQu Wenruo }
1638ff8466dSQu Wenruo 
1648ff8466dSQu Wenruo /*
1658ff8466dSQu Wenruo  * Increase the eb_refs of current subpage.
1668ff8466dSQu Wenruo  *
1678ff8466dSQu Wenruo  * This is important for eb allocation, to prevent race with last eb freeing
1688ff8466dSQu Wenruo  * of the same page.
1698ff8466dSQu Wenruo  * With the eb_refs increased before the eb inserted into radix tree,
1708ff8466dSQu Wenruo  * detach_extent_buffer_page() won't detach the page private while we're still
1718ff8466dSQu Wenruo  * allocating the extent buffer.
1728ff8466dSQu Wenruo  */
1738ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
1748ff8466dSQu Wenruo 			    struct page *page)
1758ff8466dSQu Wenruo {
1768ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
1778ff8466dSQu Wenruo 
1788ff8466dSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
1798ff8466dSQu Wenruo 		return;
1808ff8466dSQu Wenruo 
1818ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
1828ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
1838ff8466dSQu Wenruo 
1848ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
1858ff8466dSQu Wenruo 	atomic_inc(&subpage->eb_refs);
1868ff8466dSQu Wenruo }
1878ff8466dSQu Wenruo 
1888ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
1898ff8466dSQu Wenruo 			    struct page *page)
1908ff8466dSQu Wenruo {
1918ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
1928ff8466dSQu Wenruo 
1938ff8466dSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
1948ff8466dSQu Wenruo 		return;
1958ff8466dSQu Wenruo 
1968ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
1978ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
1988ff8466dSQu Wenruo 
1998ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
2008ff8466dSQu Wenruo 	ASSERT(atomic_read(&subpage->eb_refs));
2018ff8466dSQu Wenruo 	atomic_dec(&subpage->eb_refs);
2028ff8466dSQu Wenruo }
203a1d767c1SQu Wenruo 
20492082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
20592082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
20692082d40SQu Wenruo {
20792082d40SQu Wenruo 	/* Basic checks */
20892082d40SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
20992082d40SQu Wenruo 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
21092082d40SQu Wenruo 	       IS_ALIGNED(len, fs_info->sectorsize));
21192082d40SQu Wenruo 	/*
21292082d40SQu Wenruo 	 * The range check only works for mapped page, we can still have
21392082d40SQu Wenruo 	 * unmapped page like dummy extent buffer pages.
21492082d40SQu Wenruo 	 */
21592082d40SQu Wenruo 	if (page->mapping)
21692082d40SQu Wenruo 		ASSERT(page_offset(page) <= start &&
21792082d40SQu Wenruo 		       start + len <= page_offset(page) + PAGE_SIZE);
21892082d40SQu Wenruo }
21992082d40SQu Wenruo 
22092082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
22192082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
22292082d40SQu Wenruo {
22392082d40SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
22492082d40SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
22592082d40SQu Wenruo 
22692082d40SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
22792082d40SQu Wenruo 
2283d078efaSQu Wenruo 	atomic_add(nbits, &subpage->readers);
22992082d40SQu Wenruo }
23092082d40SQu Wenruo 
23192082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
23292082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
23392082d40SQu Wenruo {
23492082d40SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
23592082d40SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
2363d078efaSQu Wenruo 	bool is_data;
2373d078efaSQu Wenruo 	bool last;
23892082d40SQu Wenruo 
23992082d40SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2403d078efaSQu Wenruo 	is_data = is_data_inode(page->mapping->host);
24192082d40SQu Wenruo 	ASSERT(atomic_read(&subpage->readers) >= nbits);
2423d078efaSQu Wenruo 	last = atomic_sub_and_test(nbits, &subpage->readers);
2433d078efaSQu Wenruo 
2443d078efaSQu Wenruo 	/*
2453d078efaSQu Wenruo 	 * For data we need to unlock the page if the last read has finished.
2463d078efaSQu Wenruo 	 *
2473d078efaSQu Wenruo 	 * And please don't replace @last with atomic_sub_and_test() call
2483d078efaSQu Wenruo 	 * inside if () condition.
2493d078efaSQu Wenruo 	 * As we want the atomic_sub_and_test() to be always executed.
2503d078efaSQu Wenruo 	 */
2513d078efaSQu Wenruo 	if (is_data && last)
25292082d40SQu Wenruo 		unlock_page(page);
25392082d40SQu Wenruo }
25492082d40SQu Wenruo 
2551e1de387SQu Wenruo static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
2561e1de387SQu Wenruo {
2571e1de387SQu Wenruo 	u64 orig_start = *start;
2581e1de387SQu Wenruo 	u32 orig_len = *len;
2591e1de387SQu Wenruo 
2601e1de387SQu Wenruo 	*start = max_t(u64, page_offset(page), orig_start);
261e4f94347SQu Wenruo 	/*
262e4f94347SQu Wenruo 	 * For certain call sites like btrfs_drop_pages(), we may have pages
263e4f94347SQu Wenruo 	 * beyond the target range. In that case, just set @len to 0, subpage
264e4f94347SQu Wenruo 	 * helpers can handle @len == 0 without any problem.
265e4f94347SQu Wenruo 	 */
266e4f94347SQu Wenruo 	if (page_offset(page) >= orig_start + orig_len)
267e4f94347SQu Wenruo 		*len = 0;
268e4f94347SQu Wenruo 	else
2691e1de387SQu Wenruo 		*len = min_t(u64, page_offset(page) + PAGE_SIZE,
2701e1de387SQu Wenruo 			     orig_start + orig_len) - *start;
2711e1de387SQu Wenruo }
2721e1de387SQu Wenruo 
2731e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
2741e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
2751e1de387SQu Wenruo {
2761e1de387SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
2771e1de387SQu Wenruo 	const int nbits = (len >> fs_info->sectorsize_bits);
2781e1de387SQu Wenruo 	int ret;
2791e1de387SQu Wenruo 
2801e1de387SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2811e1de387SQu Wenruo 
2821e1de387SQu Wenruo 	ASSERT(atomic_read(&subpage->readers) == 0);
2831e1de387SQu Wenruo 	ret = atomic_add_return(nbits, &subpage->writers);
2841e1de387SQu Wenruo 	ASSERT(ret == nbits);
2851e1de387SQu Wenruo }
2861e1de387SQu Wenruo 
2871e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
2881e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
2891e1de387SQu Wenruo {
2901e1de387SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
2911e1de387SQu Wenruo 	const int nbits = (len >> fs_info->sectorsize_bits);
2921e1de387SQu Wenruo 
2931e1de387SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2941e1de387SQu Wenruo 
295164674a7SQu Wenruo 	/*
296164674a7SQu Wenruo 	 * We have call sites passing @lock_page into
297164674a7SQu Wenruo 	 * extent_clear_unlock_delalloc() for compression path.
298164674a7SQu Wenruo 	 *
299164674a7SQu Wenruo 	 * This @locked_page is locked by plain lock_page(), thus its
300164674a7SQu Wenruo 	 * subpage::writers is 0.  Handle them in a special way.
301164674a7SQu Wenruo 	 */
302164674a7SQu Wenruo 	if (atomic_read(&subpage->writers) == 0)
303164674a7SQu Wenruo 		return true;
304164674a7SQu Wenruo 
3051e1de387SQu Wenruo 	ASSERT(atomic_read(&subpage->writers) >= nbits);
3061e1de387SQu Wenruo 	return atomic_sub_and_test(nbits, &subpage->writers);
3071e1de387SQu Wenruo }
3081e1de387SQu Wenruo 
3091e1de387SQu Wenruo /*
3101e1de387SQu Wenruo  * Lock a page for delalloc page writeback.
3111e1de387SQu Wenruo  *
3121e1de387SQu Wenruo  * Return -EAGAIN if the page is not properly initialized.
3131e1de387SQu Wenruo  * Return 0 with the page locked, and writer counter updated.
3141e1de387SQu Wenruo  *
3151e1de387SQu Wenruo  * Even with 0 returned, the page still need extra check to make sure
3161e1de387SQu Wenruo  * it's really the correct page, as the caller is using
3171e1de387SQu Wenruo  * find_get_pages_contig(), which can race with page invalidating.
3181e1de387SQu Wenruo  */
3191e1de387SQu Wenruo int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
3201e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3211e1de387SQu Wenruo {
3221e1de387SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {
3231e1de387SQu Wenruo 		lock_page(page);
3241e1de387SQu Wenruo 		return 0;
3251e1de387SQu Wenruo 	}
3261e1de387SQu Wenruo 	lock_page(page);
3271e1de387SQu Wenruo 	if (!PagePrivate(page) || !page->private) {
3281e1de387SQu Wenruo 		unlock_page(page);
3291e1de387SQu Wenruo 		return -EAGAIN;
3301e1de387SQu Wenruo 	}
3311e1de387SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);
3321e1de387SQu Wenruo 	btrfs_subpage_start_writer(fs_info, page, start, len);
3331e1de387SQu Wenruo 	return 0;
3341e1de387SQu Wenruo }
3351e1de387SQu Wenruo 
3361e1de387SQu Wenruo void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
3371e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3381e1de387SQu Wenruo {
3391e1de387SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)
3401e1de387SQu Wenruo 		return unlock_page(page);
3411e1de387SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);
3421e1de387SQu Wenruo 	if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
3431e1de387SQu Wenruo 		unlock_page(page);
3441e1de387SQu Wenruo }
3451e1de387SQu Wenruo 
34672a69cd0SQu Wenruo static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
34772a69cd0SQu Wenruo 				      unsigned int nbits)
348a1d767c1SQu Wenruo {
34972a69cd0SQu Wenruo 	unsigned int found_zero;
350a1d767c1SQu Wenruo 
35172a69cd0SQu Wenruo 	found_zero = find_next_zero_bit(addr, start + nbits, start);
35272a69cd0SQu Wenruo 	if (found_zero == start + nbits)
35372a69cd0SQu Wenruo 		return true;
35472a69cd0SQu Wenruo 	return false;
355a1d767c1SQu Wenruo }
356a1d767c1SQu Wenruo 
35772a69cd0SQu Wenruo static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
35872a69cd0SQu Wenruo 				       unsigned int nbits)
35972a69cd0SQu Wenruo {
36072a69cd0SQu Wenruo 	unsigned int found_set;
36172a69cd0SQu Wenruo 
36272a69cd0SQu Wenruo 	found_set = find_next_bit(addr, start + nbits, start);
36372a69cd0SQu Wenruo 	if (found_set == start + nbits)
36472a69cd0SQu Wenruo 		return true;
36572a69cd0SQu Wenruo 	return false;
36672a69cd0SQu Wenruo }
36772a69cd0SQu Wenruo 
36872a69cd0SQu Wenruo #define subpage_calc_start_bit(fs_info, page, name, start, len)		\
36972a69cd0SQu Wenruo ({									\
37072a69cd0SQu Wenruo 	unsigned int start_bit;						\
37172a69cd0SQu Wenruo 									\
37272a69cd0SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);		\
37372a69cd0SQu Wenruo 	start_bit = offset_in_page(start) >> fs_info->sectorsize_bits;	\
37472a69cd0SQu Wenruo 	start_bit += fs_info->subpage_info->name##_offset;		\
37572a69cd0SQu Wenruo 	start_bit;							\
37672a69cd0SQu Wenruo })
37772a69cd0SQu Wenruo 
37872a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
37972a69cd0SQu Wenruo 	bitmap_test_range_all_set(subpage->bitmaps,			\
38072a69cd0SQu Wenruo 			fs_info->subpage_info->name##_offset,		\
38172a69cd0SQu Wenruo 			fs_info->subpage_info->bitmap_nr_bits)
38272a69cd0SQu Wenruo 
38372a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
38472a69cd0SQu Wenruo 	bitmap_test_range_all_zero(subpage->bitmaps,			\
38572a69cd0SQu Wenruo 			fs_info->subpage_info->name##_offset,		\
38672a69cd0SQu Wenruo 			fs_info->subpage_info->bitmap_nr_bits)
38772a69cd0SQu Wenruo 
388a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
389a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
390a1d767c1SQu Wenruo {
391a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
39272a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
39372a69cd0SQu Wenruo 							uptodate, start, len);
394a1d767c1SQu Wenruo 	unsigned long flags;
395a1d767c1SQu Wenruo 
396a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
39772a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
39872a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
399a1d767c1SQu Wenruo 		SetPageUptodate(page);
400a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
401a1d767c1SQu Wenruo }
402a1d767c1SQu Wenruo 
403a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
404a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
405a1d767c1SQu Wenruo {
406a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
40772a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
40872a69cd0SQu Wenruo 							uptodate, start, len);
409a1d767c1SQu Wenruo 	unsigned long flags;
410a1d767c1SQu Wenruo 
411a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
41272a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
413a1d767c1SQu Wenruo 	ClearPageUptodate(page);
414a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
415a1d767c1SQu Wenruo }
416a1d767c1SQu Wenruo 
41703a816b3SQu Wenruo void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
41803a816b3SQu Wenruo 		struct page *page, u64 start, u32 len)
41903a816b3SQu Wenruo {
42003a816b3SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
42172a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
42272a69cd0SQu Wenruo 							error, start, len);
42303a816b3SQu Wenruo 	unsigned long flags;
42403a816b3SQu Wenruo 
42503a816b3SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
42672a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
42703a816b3SQu Wenruo 	SetPageError(page);
42803a816b3SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
42903a816b3SQu Wenruo }
43003a816b3SQu Wenruo 
43103a816b3SQu Wenruo void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
43203a816b3SQu Wenruo 		struct page *page, u64 start, u32 len)
43303a816b3SQu Wenruo {
43403a816b3SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
43572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
43672a69cd0SQu Wenruo 							error, start, len);
43703a816b3SQu Wenruo 	unsigned long flags;
43803a816b3SQu Wenruo 
43903a816b3SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
44072a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
44172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
44203a816b3SQu Wenruo 		ClearPageError(page);
44303a816b3SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
44403a816b3SQu Wenruo }
44503a816b3SQu Wenruo 
446d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
447d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
448d8a5713eSQu Wenruo {
449d8a5713eSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
45072a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
45172a69cd0SQu Wenruo 							dirty, start, len);
452d8a5713eSQu Wenruo 	unsigned long flags;
453d8a5713eSQu Wenruo 
454d8a5713eSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
45572a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
456d8a5713eSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
457d8a5713eSQu Wenruo 	set_page_dirty(page);
458d8a5713eSQu Wenruo }
459d8a5713eSQu Wenruo 
460d8a5713eSQu Wenruo /*
461d8a5713eSQu Wenruo  * Extra clear_and_test function for subpage dirty bitmap.
462d8a5713eSQu Wenruo  *
463d8a5713eSQu Wenruo  * Return true if we're the last bits in the dirty_bitmap and clear the
464d8a5713eSQu Wenruo  * dirty_bitmap.
465d8a5713eSQu Wenruo  * Return false otherwise.
466d8a5713eSQu Wenruo  *
467d8a5713eSQu Wenruo  * NOTE: Callers should manually clear page dirty for true case, as we have
468d8a5713eSQu Wenruo  * extra handling for tree blocks.
469d8a5713eSQu Wenruo  */
470d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
471d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
472d8a5713eSQu Wenruo {
473d8a5713eSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
47472a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
47572a69cd0SQu Wenruo 							dirty, start, len);
476d8a5713eSQu Wenruo 	unsigned long flags;
477d8a5713eSQu Wenruo 	bool last = false;
478d8a5713eSQu Wenruo 
479d8a5713eSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
48072a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
48172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
482d8a5713eSQu Wenruo 		last = true;
483d8a5713eSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
484d8a5713eSQu Wenruo 	return last;
485d8a5713eSQu Wenruo }
486d8a5713eSQu Wenruo 
487d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
488d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
489d8a5713eSQu Wenruo {
490d8a5713eSQu Wenruo 	bool last;
491d8a5713eSQu Wenruo 
492d8a5713eSQu Wenruo 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
493d8a5713eSQu Wenruo 	if (last)
494d8a5713eSQu Wenruo 		clear_page_dirty_for_io(page);
495d8a5713eSQu Wenruo }
496d8a5713eSQu Wenruo 
4973470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
4983470da3bSQu Wenruo 		struct page *page, u64 start, u32 len)
4993470da3bSQu Wenruo {
5003470da3bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
50172a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
50272a69cd0SQu Wenruo 							writeback, start, len);
5033470da3bSQu Wenruo 	unsigned long flags;
5043470da3bSQu Wenruo 
5053470da3bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
50672a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
5073470da3bSQu Wenruo 	set_page_writeback(page);
5083470da3bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5093470da3bSQu Wenruo }
5103470da3bSQu Wenruo 
5113470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
5123470da3bSQu Wenruo 		struct page *page, u64 start, u32 len)
5133470da3bSQu Wenruo {
5143470da3bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
51572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
51672a69cd0SQu Wenruo 							writeback, start, len);
5173470da3bSQu Wenruo 	unsigned long flags;
5183470da3bSQu Wenruo 
5193470da3bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
52072a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
52172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
5227c11d0aeSQu Wenruo 		ASSERT(PageWriteback(page));
5233470da3bSQu Wenruo 		end_page_writeback(page);
5247c11d0aeSQu Wenruo 	}
5253470da3bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5263470da3bSQu Wenruo }
5273470da3bSQu Wenruo 
5286f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
5296f17400bSQu Wenruo 		struct page *page, u64 start, u32 len)
5306f17400bSQu Wenruo {
5316f17400bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
53272a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
53372a69cd0SQu Wenruo 							ordered, start, len);
5346f17400bSQu Wenruo 	unsigned long flags;
5356f17400bSQu Wenruo 
5366f17400bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
53772a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
5386f17400bSQu Wenruo 	SetPageOrdered(page);
5396f17400bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5406f17400bSQu Wenruo }
5416f17400bSQu Wenruo 
5426f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
5436f17400bSQu Wenruo 		struct page *page, u64 start, u32 len)
5446f17400bSQu Wenruo {
5456f17400bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
54672a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
54772a69cd0SQu Wenruo 							ordered, start, len);
5486f17400bSQu Wenruo 	unsigned long flags;
5496f17400bSQu Wenruo 
5506f17400bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
55172a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
55272a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
5536f17400bSQu Wenruo 		ClearPageOrdered(page);
5546f17400bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5556f17400bSQu Wenruo }
556e4f94347SQu Wenruo 
557e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
558e4f94347SQu Wenruo 			       struct page *page, u64 start, u32 len)
559e4f94347SQu Wenruo {
560e4f94347SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
561e4f94347SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
562e4f94347SQu Wenruo 							checked, start, len);
563e4f94347SQu Wenruo 	unsigned long flags;
564e4f94347SQu Wenruo 
565e4f94347SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
566e4f94347SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
567e4f94347SQu Wenruo 	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
568e4f94347SQu Wenruo 		SetPageChecked(page);
569e4f94347SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
570e4f94347SQu Wenruo }
571e4f94347SQu Wenruo 
572e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
573e4f94347SQu Wenruo 				 struct page *page, u64 start, u32 len)
574e4f94347SQu Wenruo {
575e4f94347SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
576e4f94347SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
577e4f94347SQu Wenruo 							checked, start, len);
578e4f94347SQu Wenruo 	unsigned long flags;
579e4f94347SQu Wenruo 
580e4f94347SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
581e4f94347SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
582e4f94347SQu Wenruo 	ClearPageChecked(page);
583e4f94347SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
584e4f94347SQu Wenruo }
585e4f94347SQu Wenruo 
586a1d767c1SQu Wenruo /*
587a1d767c1SQu Wenruo  * Unlike set/clear which is dependent on each page status, for test all bits
588a1d767c1SQu Wenruo  * are tested in the same way.
589a1d767c1SQu Wenruo  */
590a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
591a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
592a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
593a1d767c1SQu Wenruo {									\
594a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
59572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,	\
59672a69cd0SQu Wenruo 						name, start, len);	\
597a1d767c1SQu Wenruo 	unsigned long flags;						\
598a1d767c1SQu Wenruo 	bool ret;							\
599a1d767c1SQu Wenruo 									\
600a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);			\
60172a69cd0SQu Wenruo 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
60272a69cd0SQu Wenruo 				len >> fs_info->sectorsize_bits);	\
603a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);			\
604a1d767c1SQu Wenruo 	return ret;							\
605a1d767c1SQu Wenruo }
606a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
60703a816b3SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
608d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
6093470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
6106f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
611e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
612a1d767c1SQu Wenruo 
613a1d767c1SQu Wenruo /*
614a1d767c1SQu Wenruo  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
615a1d767c1SQu Wenruo  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
616a1d767c1SQu Wenruo  * back to regular sectorsize branch.
617a1d767c1SQu Wenruo  */
618a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,	\
619a1d767c1SQu Wenruo 			       test_page_func)				\
620a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,		\
621a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
622a1d767c1SQu Wenruo {									\
623a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
624a1d767c1SQu Wenruo 		set_page_func(page);					\
625a1d767c1SQu Wenruo 		return;							\
626a1d767c1SQu Wenruo 	}								\
627a1d767c1SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
628a1d767c1SQu Wenruo }									\
629a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,	\
630a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
631a1d767c1SQu Wenruo {									\
632a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
633a1d767c1SQu Wenruo 		clear_page_func(page);					\
634a1d767c1SQu Wenruo 		return;							\
635a1d767c1SQu Wenruo 	}								\
636a1d767c1SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
637a1d767c1SQu Wenruo }									\
638a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,	\
639a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
640a1d767c1SQu Wenruo {									\
641a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
642a1d767c1SQu Wenruo 		return test_page_func(page);				\
643a1d767c1SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
64460e2d255SQu Wenruo }									\
64560e2d255SQu Wenruo void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
64660e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
64760e2d255SQu Wenruo {									\
64860e2d255SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
64960e2d255SQu Wenruo 		set_page_func(page);					\
65060e2d255SQu Wenruo 		return;							\
65160e2d255SQu Wenruo 	}								\
65260e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
65360e2d255SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
65460e2d255SQu Wenruo }									\
65560e2d255SQu Wenruo void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
65660e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
65760e2d255SQu Wenruo {									\
65860e2d255SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
65960e2d255SQu Wenruo 		clear_page_func(page);					\
66060e2d255SQu Wenruo 		return;							\
66160e2d255SQu Wenruo 	}								\
66260e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
66360e2d255SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
66460e2d255SQu Wenruo }									\
66560e2d255SQu Wenruo bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
66660e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
66760e2d255SQu Wenruo {									\
66860e2d255SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
66960e2d255SQu Wenruo 		return test_page_func(page);				\
67060e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
67160e2d255SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
672a1d767c1SQu Wenruo }
673a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
674a1d767c1SQu Wenruo 			 PageUptodate);
67503a816b3SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
676d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
677d8a5713eSQu Wenruo 			 PageDirty);
6783470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
6793470da3bSQu Wenruo 			 PageWriteback);
6806f17400bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
6816f17400bSQu Wenruo 			 PageOrdered);
682e4f94347SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
683cc1d0d93SQu Wenruo 
684cc1d0d93SQu Wenruo /*
685cc1d0d93SQu Wenruo  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
686cc1d0d93SQu Wenruo  * is cleared.
687cc1d0d93SQu Wenruo  */
688cc1d0d93SQu Wenruo void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
689cc1d0d93SQu Wenruo 				 struct page *page)
690cc1d0d93SQu Wenruo {
691cc1d0d93SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
692cc1d0d93SQu Wenruo 
693cc1d0d93SQu Wenruo 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
694cc1d0d93SQu Wenruo 		return;
695cc1d0d93SQu Wenruo 
696cc1d0d93SQu Wenruo 	ASSERT(!PageDirty(page));
697cc1d0d93SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
698cc1d0d93SQu Wenruo 		return;
699cc1d0d93SQu Wenruo 
700cc1d0d93SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
70172a69cd0SQu Wenruo 	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
702cc1d0d93SQu Wenruo }
703e55a0de1SQu Wenruo 
704e55a0de1SQu Wenruo /*
705e55a0de1SQu Wenruo  * Handle different locked pages with different page sizes:
706e55a0de1SQu Wenruo  *
707e55a0de1SQu Wenruo  * - Page locked by plain lock_page()
708e55a0de1SQu Wenruo  *   It should not have any subpage::writers count.
709e55a0de1SQu Wenruo  *   Can be unlocked by unlock_page().
710e55a0de1SQu Wenruo  *   This is the most common locked page for __extent_writepage() called
711e55a0de1SQu Wenruo  *   inside extent_write_cache_pages() or extent_write_full_page().
712e55a0de1SQu Wenruo  *   Rarer cases include the @locked_page from extent_write_locked_range().
713e55a0de1SQu Wenruo  *
714e55a0de1SQu Wenruo  * - Page locked by lock_delalloc_pages()
715e55a0de1SQu Wenruo  *   There is only one caller, all pages except @locked_page for
716e55a0de1SQu Wenruo  *   extent_write_locked_range().
717e55a0de1SQu Wenruo  *   In this case, we have to call subpage helper to handle the case.
718e55a0de1SQu Wenruo  */
719e55a0de1SQu Wenruo void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
720e55a0de1SQu Wenruo 			      u64 start, u32 len)
721e55a0de1SQu Wenruo {
722e55a0de1SQu Wenruo 	struct btrfs_subpage *subpage;
723e55a0de1SQu Wenruo 
724e55a0de1SQu Wenruo 	ASSERT(PageLocked(page));
725e55a0de1SQu Wenruo 	/* For regular page size case, we just unlock the page */
726e55a0de1SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
727e55a0de1SQu Wenruo 		return unlock_page(page);
728e55a0de1SQu Wenruo 
729e55a0de1SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
730e55a0de1SQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
731e55a0de1SQu Wenruo 
732e55a0de1SQu Wenruo 	/*
733e55a0de1SQu Wenruo 	 * For subpage case, there are two types of locked page.  With or
734e55a0de1SQu Wenruo 	 * without writers number.
735e55a0de1SQu Wenruo 	 *
736e55a0de1SQu Wenruo 	 * Since we own the page lock, no one else could touch subpage::writers
737e55a0de1SQu Wenruo 	 * and we are safe to do several atomic operations without spinlock.
738e55a0de1SQu Wenruo 	 */
739*c992fa1fSQu Wenruo 	if (atomic_read(&subpage->writers) == 0)
740e55a0de1SQu Wenruo 		/* No writers, locked by plain lock_page() */
741e55a0de1SQu Wenruo 		return unlock_page(page);
742e55a0de1SQu Wenruo 
743e55a0de1SQu Wenruo 	/* Have writers, use proper subpage helper to end it */
744e55a0de1SQu Wenruo 	btrfs_page_end_writer_lock(fs_info, page, start, len);
745e55a0de1SQu Wenruo }
746