xref: /openbmc/linux/fs/btrfs/subpage.c (revision e55a0de1)
1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0
2cac06d84SQu Wenruo 
3cac06d84SQu Wenruo #include <linux/slab.h>
4cac06d84SQu Wenruo #include "ctree.h"
5cac06d84SQu Wenruo #include "subpage.h"
63d078efaSQu Wenruo #include "btrfs_inode.h"
7cac06d84SQu Wenruo 
8894d1378SQu Wenruo /*
9894d1378SQu Wenruo  * Subpage (sectorsize < PAGE_SIZE) support overview:
10894d1378SQu Wenruo  *
11894d1378SQu Wenruo  * Limitations:
12894d1378SQu Wenruo  *
13894d1378SQu Wenruo  * - Only support 64K page size for now
14894d1378SQu Wenruo  *   This is to make metadata handling easier, as 64K page would ensure
15894d1378SQu Wenruo  *   all nodesize would fit inside one page, thus we don't need to handle
16894d1378SQu Wenruo  *   cases where a tree block crosses several pages.
17894d1378SQu Wenruo  *
18894d1378SQu Wenruo  * - Only metadata read-write for now
19894d1378SQu Wenruo  *   The data read-write part is in development.
20894d1378SQu Wenruo  *
21894d1378SQu Wenruo  * - Metadata can't cross 64K page boundary
22894d1378SQu Wenruo  *   btrfs-progs and kernel have done that for a while, thus only ancient
23894d1378SQu Wenruo  *   filesystems could have such problem.  For such case, do a graceful
24894d1378SQu Wenruo  *   rejection.
25894d1378SQu Wenruo  *
26894d1378SQu Wenruo  * Special behavior:
27894d1378SQu Wenruo  *
28894d1378SQu Wenruo  * - Metadata
29894d1378SQu Wenruo  *   Metadata read is fully supported.
30894d1378SQu Wenruo  *   Meaning when reading one tree block will only trigger the read for the
31894d1378SQu Wenruo  *   needed range, other unrelated range in the same page will not be touched.
32894d1378SQu Wenruo  *
33894d1378SQu Wenruo  *   Metadata write support is partial.
34894d1378SQu Wenruo  *   The writeback is still for the full page, but we will only submit
35894d1378SQu Wenruo  *   the dirty extent buffers in the page.
36894d1378SQu Wenruo  *
37894d1378SQu Wenruo  *   This means, if we have a metadata page like this:
38894d1378SQu Wenruo  *
39894d1378SQu Wenruo  *   Page offset
40894d1378SQu Wenruo  *   0         16K         32K         48K        64K
41894d1378SQu Wenruo  *   |/////////|           |///////////|
42894d1378SQu Wenruo  *        \- Tree block A        \- Tree block B
43894d1378SQu Wenruo  *
44894d1378SQu Wenruo  *   Even if we just want to writeback tree block A, we will also writeback
45894d1378SQu Wenruo  *   tree block B if it's also dirty.
46894d1378SQu Wenruo  *
47894d1378SQu Wenruo  *   This may cause extra metadata writeback which results more COW.
48894d1378SQu Wenruo  *
49894d1378SQu Wenruo  * Implementation:
50894d1378SQu Wenruo  *
51894d1378SQu Wenruo  * - Common
52894d1378SQu Wenruo  *   Both metadata and data will use a new structure, btrfs_subpage, to
53894d1378SQu Wenruo  *   record the status of each sector inside a page.  This provides the extra
54894d1378SQu Wenruo  *   granularity needed.
55894d1378SQu Wenruo  *
56894d1378SQu Wenruo  * - Metadata
57894d1378SQu Wenruo  *   Since we have multiple tree blocks inside one page, we can't rely on page
58894d1378SQu Wenruo  *   locking anymore, or we will have greatly reduced concurrency or even
59894d1378SQu Wenruo  *   deadlocks (hold one tree lock while trying to lock another tree lock in
60894d1378SQu Wenruo  *   the same page).
61894d1378SQu Wenruo  *
62894d1378SQu Wenruo  *   Thus for metadata locking, subpage support relies on io_tree locking only.
63894d1378SQu Wenruo  *   This means a slightly higher tree locking latency.
64894d1378SQu Wenruo  */
65894d1378SQu Wenruo 
668481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
678481dd80SQu Wenruo {
688481dd80SQu Wenruo 	unsigned int cur = 0;
698481dd80SQu Wenruo 	unsigned int nr_bits;
708481dd80SQu Wenruo 
718481dd80SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
728481dd80SQu Wenruo 
738481dd80SQu Wenruo 	nr_bits = PAGE_SIZE / sectorsize;
748481dd80SQu Wenruo 	subpage_info->bitmap_nr_bits = nr_bits;
758481dd80SQu Wenruo 
768481dd80SQu Wenruo 	subpage_info->uptodate_offset = cur;
778481dd80SQu Wenruo 	cur += nr_bits;
788481dd80SQu Wenruo 
798481dd80SQu Wenruo 	subpage_info->error_offset = cur;
808481dd80SQu Wenruo 	cur += nr_bits;
818481dd80SQu Wenruo 
828481dd80SQu Wenruo 	subpage_info->dirty_offset = cur;
838481dd80SQu Wenruo 	cur += nr_bits;
848481dd80SQu Wenruo 
858481dd80SQu Wenruo 	subpage_info->writeback_offset = cur;
868481dd80SQu Wenruo 	cur += nr_bits;
878481dd80SQu Wenruo 
888481dd80SQu Wenruo 	subpage_info->ordered_offset = cur;
898481dd80SQu Wenruo 	cur += nr_bits;
908481dd80SQu Wenruo 
91e4f94347SQu Wenruo 	subpage_info->checked_offset = cur;
92e4f94347SQu Wenruo 	cur += nr_bits;
93e4f94347SQu Wenruo 
948481dd80SQu Wenruo 	subpage_info->total_nr_bits = cur;
958481dd80SQu Wenruo }
968481dd80SQu Wenruo 
97cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
98cac06d84SQu Wenruo 			 struct page *page, enum btrfs_subpage_type type)
99cac06d84SQu Wenruo {
100651fb419SQu Wenruo 	struct btrfs_subpage *subpage;
101cac06d84SQu Wenruo 
102cac06d84SQu Wenruo 	/*
103cac06d84SQu Wenruo 	 * We have cases like a dummy extent buffer page, which is not mappped
104cac06d84SQu Wenruo 	 * and doesn't need to be locked.
105cac06d84SQu Wenruo 	 */
106cac06d84SQu Wenruo 	if (page->mapping)
107cac06d84SQu Wenruo 		ASSERT(PageLocked(page));
108651fb419SQu Wenruo 
109cac06d84SQu Wenruo 	/* Either not subpage, or the page already has private attached */
110cac06d84SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
111cac06d84SQu Wenruo 		return 0;
112cac06d84SQu Wenruo 
113651fb419SQu Wenruo 	subpage = btrfs_alloc_subpage(fs_info, type);
114651fb419SQu Wenruo 	if (IS_ERR(subpage))
115651fb419SQu Wenruo 		return  PTR_ERR(subpage);
116651fb419SQu Wenruo 
117cac06d84SQu Wenruo 	attach_page_private(page, subpage);
118cac06d84SQu Wenruo 	return 0;
119cac06d84SQu Wenruo }
120cac06d84SQu Wenruo 
121cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
122cac06d84SQu Wenruo 			  struct page *page)
123cac06d84SQu Wenruo {
124cac06d84SQu Wenruo 	struct btrfs_subpage *subpage;
125cac06d84SQu Wenruo 
126cac06d84SQu Wenruo 	/* Either not subpage, or already detached */
127cac06d84SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
128cac06d84SQu Wenruo 		return;
129cac06d84SQu Wenruo 
130cac06d84SQu Wenruo 	subpage = (struct btrfs_subpage *)detach_page_private(page);
131cac06d84SQu Wenruo 	ASSERT(subpage);
132760f991fSQu Wenruo 	btrfs_free_subpage(subpage);
133760f991fSQu Wenruo }
134760f991fSQu Wenruo 
135651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
136760f991fSQu Wenruo 					  enum btrfs_subpage_type type)
137760f991fSQu Wenruo {
138651fb419SQu Wenruo 	struct btrfs_subpage *ret;
13972a69cd0SQu Wenruo 	unsigned int real_size;
140651fb419SQu Wenruo 
141fdf250dbSQu Wenruo 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
142760f991fSQu Wenruo 
14372a69cd0SQu Wenruo 	real_size = struct_size(ret, bitmaps,
14472a69cd0SQu Wenruo 			BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
14572a69cd0SQu Wenruo 	ret = kzalloc(real_size, GFP_NOFS);
146651fb419SQu Wenruo 	if (!ret)
147651fb419SQu Wenruo 		return ERR_PTR(-ENOMEM);
148651fb419SQu Wenruo 
149651fb419SQu Wenruo 	spin_lock_init(&ret->lock);
1501e1de387SQu Wenruo 	if (type == BTRFS_SUBPAGE_METADATA) {
151651fb419SQu Wenruo 		atomic_set(&ret->eb_refs, 0);
1521e1de387SQu Wenruo 	} else {
153651fb419SQu Wenruo 		atomic_set(&ret->readers, 0);
154651fb419SQu Wenruo 		atomic_set(&ret->writers, 0);
1551e1de387SQu Wenruo 	}
156651fb419SQu Wenruo 	return ret;
157760f991fSQu Wenruo }
158760f991fSQu Wenruo 
159760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage)
160760f991fSQu Wenruo {
161cac06d84SQu Wenruo 	kfree(subpage);
162cac06d84SQu Wenruo }
1638ff8466dSQu Wenruo 
1648ff8466dSQu Wenruo /*
1658ff8466dSQu Wenruo  * Increase the eb_refs of current subpage.
1668ff8466dSQu Wenruo  *
1678ff8466dSQu Wenruo  * This is important for eb allocation, to prevent race with last eb freeing
1688ff8466dSQu Wenruo  * of the same page.
1698ff8466dSQu Wenruo  * With the eb_refs increased before the eb inserted into radix tree,
1708ff8466dSQu Wenruo  * detach_extent_buffer_page() won't detach the page private while we're still
1718ff8466dSQu Wenruo  * allocating the extent buffer.
1728ff8466dSQu Wenruo  */
1738ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
1748ff8466dSQu Wenruo 			    struct page *page)
1758ff8466dSQu Wenruo {
1768ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
1778ff8466dSQu Wenruo 
1788ff8466dSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
1798ff8466dSQu Wenruo 		return;
1808ff8466dSQu Wenruo 
1818ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
1828ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
1838ff8466dSQu Wenruo 
1848ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
1858ff8466dSQu Wenruo 	atomic_inc(&subpage->eb_refs);
1868ff8466dSQu Wenruo }
1878ff8466dSQu Wenruo 
1888ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
1898ff8466dSQu Wenruo 			    struct page *page)
1908ff8466dSQu Wenruo {
1918ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
1928ff8466dSQu Wenruo 
1938ff8466dSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
1948ff8466dSQu Wenruo 		return;
1958ff8466dSQu Wenruo 
1968ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
1978ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
1988ff8466dSQu Wenruo 
1998ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
2008ff8466dSQu Wenruo 	ASSERT(atomic_read(&subpage->eb_refs));
2018ff8466dSQu Wenruo 	atomic_dec(&subpage->eb_refs);
2028ff8466dSQu Wenruo }
203a1d767c1SQu Wenruo 
20492082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
20592082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
20692082d40SQu Wenruo {
20792082d40SQu Wenruo 	/* Basic checks */
20892082d40SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
20992082d40SQu Wenruo 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
21092082d40SQu Wenruo 	       IS_ALIGNED(len, fs_info->sectorsize));
21192082d40SQu Wenruo 	/*
21292082d40SQu Wenruo 	 * The range check only works for mapped page, we can still have
21392082d40SQu Wenruo 	 * unmapped page like dummy extent buffer pages.
21492082d40SQu Wenruo 	 */
21592082d40SQu Wenruo 	if (page->mapping)
21692082d40SQu Wenruo 		ASSERT(page_offset(page) <= start &&
21792082d40SQu Wenruo 		       start + len <= page_offset(page) + PAGE_SIZE);
21892082d40SQu Wenruo }
21992082d40SQu Wenruo 
22092082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
22192082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
22292082d40SQu Wenruo {
22392082d40SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
22492082d40SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
22592082d40SQu Wenruo 
22692082d40SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
22792082d40SQu Wenruo 
2283d078efaSQu Wenruo 	atomic_add(nbits, &subpage->readers);
22992082d40SQu Wenruo }
23092082d40SQu Wenruo 
23192082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
23292082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
23392082d40SQu Wenruo {
23492082d40SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
23592082d40SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
2363d078efaSQu Wenruo 	bool is_data;
2373d078efaSQu Wenruo 	bool last;
23892082d40SQu Wenruo 
23992082d40SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2403d078efaSQu Wenruo 	is_data = is_data_inode(page->mapping->host);
24192082d40SQu Wenruo 	ASSERT(atomic_read(&subpage->readers) >= nbits);
2423d078efaSQu Wenruo 	last = atomic_sub_and_test(nbits, &subpage->readers);
2433d078efaSQu Wenruo 
2443d078efaSQu Wenruo 	/*
2453d078efaSQu Wenruo 	 * For data we need to unlock the page if the last read has finished.
2463d078efaSQu Wenruo 	 *
2473d078efaSQu Wenruo 	 * And please don't replace @last with atomic_sub_and_test() call
2483d078efaSQu Wenruo 	 * inside if () condition.
2493d078efaSQu Wenruo 	 * As we want the atomic_sub_and_test() to be always executed.
2503d078efaSQu Wenruo 	 */
2513d078efaSQu Wenruo 	if (is_data && last)
25292082d40SQu Wenruo 		unlock_page(page);
25392082d40SQu Wenruo }
25492082d40SQu Wenruo 
2551e1de387SQu Wenruo static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
2561e1de387SQu Wenruo {
2571e1de387SQu Wenruo 	u64 orig_start = *start;
2581e1de387SQu Wenruo 	u32 orig_len = *len;
2591e1de387SQu Wenruo 
2601e1de387SQu Wenruo 	*start = max_t(u64, page_offset(page), orig_start);
261e4f94347SQu Wenruo 	/*
262e4f94347SQu Wenruo 	 * For certain call sites like btrfs_drop_pages(), we may have pages
263e4f94347SQu Wenruo 	 * beyond the target range. In that case, just set @len to 0, subpage
264e4f94347SQu Wenruo 	 * helpers can handle @len == 0 without any problem.
265e4f94347SQu Wenruo 	 */
266e4f94347SQu Wenruo 	if (page_offset(page) >= orig_start + orig_len)
267e4f94347SQu Wenruo 		*len = 0;
268e4f94347SQu Wenruo 	else
2691e1de387SQu Wenruo 		*len = min_t(u64, page_offset(page) + PAGE_SIZE,
2701e1de387SQu Wenruo 			     orig_start + orig_len) - *start;
2711e1de387SQu Wenruo }
2721e1de387SQu Wenruo 
2731e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
2741e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
2751e1de387SQu Wenruo {
2761e1de387SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
2771e1de387SQu Wenruo 	const int nbits = (len >> fs_info->sectorsize_bits);
2781e1de387SQu Wenruo 	int ret;
2791e1de387SQu Wenruo 
2801e1de387SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2811e1de387SQu Wenruo 
2821e1de387SQu Wenruo 	ASSERT(atomic_read(&subpage->readers) == 0);
2831e1de387SQu Wenruo 	ret = atomic_add_return(nbits, &subpage->writers);
2841e1de387SQu Wenruo 	ASSERT(ret == nbits);
2851e1de387SQu Wenruo }
2861e1de387SQu Wenruo 
2871e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
2881e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
2891e1de387SQu Wenruo {
2901e1de387SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
2911e1de387SQu Wenruo 	const int nbits = (len >> fs_info->sectorsize_bits);
2921e1de387SQu Wenruo 
2931e1de387SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2941e1de387SQu Wenruo 
2951e1de387SQu Wenruo 	ASSERT(atomic_read(&subpage->writers) >= nbits);
2961e1de387SQu Wenruo 	return atomic_sub_and_test(nbits, &subpage->writers);
2971e1de387SQu Wenruo }
2981e1de387SQu Wenruo 
2991e1de387SQu Wenruo /*
3001e1de387SQu Wenruo  * Lock a page for delalloc page writeback.
3011e1de387SQu Wenruo  *
3021e1de387SQu Wenruo  * Return -EAGAIN if the page is not properly initialized.
3031e1de387SQu Wenruo  * Return 0 with the page locked, and writer counter updated.
3041e1de387SQu Wenruo  *
3051e1de387SQu Wenruo  * Even with 0 returned, the page still need extra check to make sure
3061e1de387SQu Wenruo  * it's really the correct page, as the caller is using
3071e1de387SQu Wenruo  * find_get_pages_contig(), which can race with page invalidating.
3081e1de387SQu Wenruo  */
3091e1de387SQu Wenruo int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
3101e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3111e1de387SQu Wenruo {
3121e1de387SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {
3131e1de387SQu Wenruo 		lock_page(page);
3141e1de387SQu Wenruo 		return 0;
3151e1de387SQu Wenruo 	}
3161e1de387SQu Wenruo 	lock_page(page);
3171e1de387SQu Wenruo 	if (!PagePrivate(page) || !page->private) {
3181e1de387SQu Wenruo 		unlock_page(page);
3191e1de387SQu Wenruo 		return -EAGAIN;
3201e1de387SQu Wenruo 	}
3211e1de387SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);
3221e1de387SQu Wenruo 	btrfs_subpage_start_writer(fs_info, page, start, len);
3231e1de387SQu Wenruo 	return 0;
3241e1de387SQu Wenruo }
3251e1de387SQu Wenruo 
3261e1de387SQu Wenruo void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
3271e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3281e1de387SQu Wenruo {
3291e1de387SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)
3301e1de387SQu Wenruo 		return unlock_page(page);
3311e1de387SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);
3321e1de387SQu Wenruo 	if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
3331e1de387SQu Wenruo 		unlock_page(page);
3341e1de387SQu Wenruo }
3351e1de387SQu Wenruo 
33672a69cd0SQu Wenruo static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
33772a69cd0SQu Wenruo 				      unsigned int nbits)
338a1d767c1SQu Wenruo {
33972a69cd0SQu Wenruo 	unsigned int found_zero;
340a1d767c1SQu Wenruo 
34172a69cd0SQu Wenruo 	found_zero = find_next_zero_bit(addr, start + nbits, start);
34272a69cd0SQu Wenruo 	if (found_zero == start + nbits)
34372a69cd0SQu Wenruo 		return true;
34472a69cd0SQu Wenruo 	return false;
345a1d767c1SQu Wenruo }
346a1d767c1SQu Wenruo 
34772a69cd0SQu Wenruo static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
34872a69cd0SQu Wenruo 				       unsigned int nbits)
34972a69cd0SQu Wenruo {
35072a69cd0SQu Wenruo 	unsigned int found_set;
35172a69cd0SQu Wenruo 
35272a69cd0SQu Wenruo 	found_set = find_next_bit(addr, start + nbits, start);
35372a69cd0SQu Wenruo 	if (found_set == start + nbits)
35472a69cd0SQu Wenruo 		return true;
35572a69cd0SQu Wenruo 	return false;
35672a69cd0SQu Wenruo }
35772a69cd0SQu Wenruo 
35872a69cd0SQu Wenruo #define subpage_calc_start_bit(fs_info, page, name, start, len)		\
35972a69cd0SQu Wenruo ({									\
36072a69cd0SQu Wenruo 	unsigned int start_bit;						\
36172a69cd0SQu Wenruo 									\
36272a69cd0SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);		\
36372a69cd0SQu Wenruo 	start_bit = offset_in_page(start) >> fs_info->sectorsize_bits;	\
36472a69cd0SQu Wenruo 	start_bit += fs_info->subpage_info->name##_offset;		\
36572a69cd0SQu Wenruo 	start_bit;							\
36672a69cd0SQu Wenruo })
36772a69cd0SQu Wenruo 
36872a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
36972a69cd0SQu Wenruo 	bitmap_test_range_all_set(subpage->bitmaps,			\
37072a69cd0SQu Wenruo 			fs_info->subpage_info->name##_offset,		\
37172a69cd0SQu Wenruo 			fs_info->subpage_info->bitmap_nr_bits)
37272a69cd0SQu Wenruo 
37372a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
37472a69cd0SQu Wenruo 	bitmap_test_range_all_zero(subpage->bitmaps,			\
37572a69cd0SQu Wenruo 			fs_info->subpage_info->name##_offset,		\
37672a69cd0SQu Wenruo 			fs_info->subpage_info->bitmap_nr_bits)
37772a69cd0SQu Wenruo 
378a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
379a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
380a1d767c1SQu Wenruo {
381a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
38272a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
38372a69cd0SQu Wenruo 							uptodate, start, len);
384a1d767c1SQu Wenruo 	unsigned long flags;
385a1d767c1SQu Wenruo 
386a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
38772a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
38872a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
389a1d767c1SQu Wenruo 		SetPageUptodate(page);
390a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
391a1d767c1SQu Wenruo }
392a1d767c1SQu Wenruo 
393a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
394a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
395a1d767c1SQu Wenruo {
396a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
39772a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
39872a69cd0SQu Wenruo 							uptodate, start, len);
399a1d767c1SQu Wenruo 	unsigned long flags;
400a1d767c1SQu Wenruo 
401a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
40272a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
403a1d767c1SQu Wenruo 	ClearPageUptodate(page);
404a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
405a1d767c1SQu Wenruo }
406a1d767c1SQu Wenruo 
40703a816b3SQu Wenruo void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
40803a816b3SQu Wenruo 		struct page *page, u64 start, u32 len)
40903a816b3SQu Wenruo {
41003a816b3SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
41172a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
41272a69cd0SQu Wenruo 							error, start, len);
41303a816b3SQu Wenruo 	unsigned long flags;
41403a816b3SQu Wenruo 
41503a816b3SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
41672a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
41703a816b3SQu Wenruo 	SetPageError(page);
41803a816b3SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
41903a816b3SQu Wenruo }
42003a816b3SQu Wenruo 
42103a816b3SQu Wenruo void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
42203a816b3SQu Wenruo 		struct page *page, u64 start, u32 len)
42303a816b3SQu Wenruo {
42403a816b3SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
42572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
42672a69cd0SQu Wenruo 							error, start, len);
42703a816b3SQu Wenruo 	unsigned long flags;
42803a816b3SQu Wenruo 
42903a816b3SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
43072a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
43172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
43203a816b3SQu Wenruo 		ClearPageError(page);
43303a816b3SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
43403a816b3SQu Wenruo }
43503a816b3SQu Wenruo 
436d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
437d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
438d8a5713eSQu Wenruo {
439d8a5713eSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
44072a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
44172a69cd0SQu Wenruo 							dirty, start, len);
442d8a5713eSQu Wenruo 	unsigned long flags;
443d8a5713eSQu Wenruo 
444d8a5713eSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
44572a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
446d8a5713eSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
447d8a5713eSQu Wenruo 	set_page_dirty(page);
448d8a5713eSQu Wenruo }
449d8a5713eSQu Wenruo 
450d8a5713eSQu Wenruo /*
451d8a5713eSQu Wenruo  * Extra clear_and_test function for subpage dirty bitmap.
452d8a5713eSQu Wenruo  *
453d8a5713eSQu Wenruo  * Return true if we're the last bits in the dirty_bitmap and clear the
454d8a5713eSQu Wenruo  * dirty_bitmap.
455d8a5713eSQu Wenruo  * Return false otherwise.
456d8a5713eSQu Wenruo  *
457d8a5713eSQu Wenruo  * NOTE: Callers should manually clear page dirty for true case, as we have
458d8a5713eSQu Wenruo  * extra handling for tree blocks.
459d8a5713eSQu Wenruo  */
460d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
461d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
462d8a5713eSQu Wenruo {
463d8a5713eSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
46472a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
46572a69cd0SQu Wenruo 							dirty, start, len);
466d8a5713eSQu Wenruo 	unsigned long flags;
467d8a5713eSQu Wenruo 	bool last = false;
468d8a5713eSQu Wenruo 
469d8a5713eSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
47072a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
47172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
472d8a5713eSQu Wenruo 		last = true;
473d8a5713eSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
474d8a5713eSQu Wenruo 	return last;
475d8a5713eSQu Wenruo }
476d8a5713eSQu Wenruo 
477d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
478d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
479d8a5713eSQu Wenruo {
480d8a5713eSQu Wenruo 	bool last;
481d8a5713eSQu Wenruo 
482d8a5713eSQu Wenruo 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
483d8a5713eSQu Wenruo 	if (last)
484d8a5713eSQu Wenruo 		clear_page_dirty_for_io(page);
485d8a5713eSQu Wenruo }
486d8a5713eSQu Wenruo 
4873470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
4883470da3bSQu Wenruo 		struct page *page, u64 start, u32 len)
4893470da3bSQu Wenruo {
4903470da3bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
49172a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
49272a69cd0SQu Wenruo 							writeback, start, len);
4933470da3bSQu Wenruo 	unsigned long flags;
4943470da3bSQu Wenruo 
4953470da3bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
49672a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
4973470da3bSQu Wenruo 	set_page_writeback(page);
4983470da3bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
4993470da3bSQu Wenruo }
5003470da3bSQu Wenruo 
5013470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
5023470da3bSQu Wenruo 		struct page *page, u64 start, u32 len)
5033470da3bSQu Wenruo {
5043470da3bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
50572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
50672a69cd0SQu Wenruo 							writeback, start, len);
5073470da3bSQu Wenruo 	unsigned long flags;
5083470da3bSQu Wenruo 
5093470da3bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
51072a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
51172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
5127c11d0aeSQu Wenruo 		ASSERT(PageWriteback(page));
5133470da3bSQu Wenruo 		end_page_writeback(page);
5147c11d0aeSQu Wenruo 	}
5153470da3bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5163470da3bSQu Wenruo }
5173470da3bSQu Wenruo 
5186f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
5196f17400bSQu Wenruo 		struct page *page, u64 start, u32 len)
5206f17400bSQu Wenruo {
5216f17400bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
52272a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
52372a69cd0SQu Wenruo 							ordered, start, len);
5246f17400bSQu Wenruo 	unsigned long flags;
5256f17400bSQu Wenruo 
5266f17400bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
52772a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
5286f17400bSQu Wenruo 	SetPageOrdered(page);
5296f17400bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5306f17400bSQu Wenruo }
5316f17400bSQu Wenruo 
5326f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
5336f17400bSQu Wenruo 		struct page *page, u64 start, u32 len)
5346f17400bSQu Wenruo {
5356f17400bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
53672a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
53772a69cd0SQu Wenruo 							ordered, start, len);
5386f17400bSQu Wenruo 	unsigned long flags;
5396f17400bSQu Wenruo 
5406f17400bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
54172a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
54272a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
5436f17400bSQu Wenruo 		ClearPageOrdered(page);
5446f17400bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5456f17400bSQu Wenruo }
546e4f94347SQu Wenruo 
547e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
548e4f94347SQu Wenruo 			       struct page *page, u64 start, u32 len)
549e4f94347SQu Wenruo {
550e4f94347SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
551e4f94347SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
552e4f94347SQu Wenruo 							checked, start, len);
553e4f94347SQu Wenruo 	unsigned long flags;
554e4f94347SQu Wenruo 
555e4f94347SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
556e4f94347SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
557e4f94347SQu Wenruo 	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
558e4f94347SQu Wenruo 		SetPageChecked(page);
559e4f94347SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
560e4f94347SQu Wenruo }
561e4f94347SQu Wenruo 
562e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
563e4f94347SQu Wenruo 				 struct page *page, u64 start, u32 len)
564e4f94347SQu Wenruo {
565e4f94347SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
566e4f94347SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
567e4f94347SQu Wenruo 							checked, start, len);
568e4f94347SQu Wenruo 	unsigned long flags;
569e4f94347SQu Wenruo 
570e4f94347SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
571e4f94347SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
572e4f94347SQu Wenruo 	ClearPageChecked(page);
573e4f94347SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
574e4f94347SQu Wenruo }
575e4f94347SQu Wenruo 
576a1d767c1SQu Wenruo /*
577a1d767c1SQu Wenruo  * Unlike set/clear which is dependent on each page status, for test all bits
578a1d767c1SQu Wenruo  * are tested in the same way.
579a1d767c1SQu Wenruo  */
580a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
581a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
582a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
583a1d767c1SQu Wenruo {									\
584a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
58572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,	\
58672a69cd0SQu Wenruo 						name, start, len);	\
587a1d767c1SQu Wenruo 	unsigned long flags;						\
588a1d767c1SQu Wenruo 	bool ret;							\
589a1d767c1SQu Wenruo 									\
590a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);			\
59172a69cd0SQu Wenruo 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
59272a69cd0SQu Wenruo 				len >> fs_info->sectorsize_bits);	\
593a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);			\
594a1d767c1SQu Wenruo 	return ret;							\
595a1d767c1SQu Wenruo }
596a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
59703a816b3SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
598d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
5993470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
6006f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
601e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
602a1d767c1SQu Wenruo 
603a1d767c1SQu Wenruo /*
604a1d767c1SQu Wenruo  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
605a1d767c1SQu Wenruo  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
606a1d767c1SQu Wenruo  * back to regular sectorsize branch.
607a1d767c1SQu Wenruo  */
608a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,	\
609a1d767c1SQu Wenruo 			       test_page_func)				\
610a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,		\
611a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
612a1d767c1SQu Wenruo {									\
613a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
614a1d767c1SQu Wenruo 		set_page_func(page);					\
615a1d767c1SQu Wenruo 		return;							\
616a1d767c1SQu Wenruo 	}								\
617a1d767c1SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
618a1d767c1SQu Wenruo }									\
619a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,	\
620a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
621a1d767c1SQu Wenruo {									\
622a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
623a1d767c1SQu Wenruo 		clear_page_func(page);					\
624a1d767c1SQu Wenruo 		return;							\
625a1d767c1SQu Wenruo 	}								\
626a1d767c1SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
627a1d767c1SQu Wenruo }									\
628a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,	\
629a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
630a1d767c1SQu Wenruo {									\
631a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
632a1d767c1SQu Wenruo 		return test_page_func(page);				\
633a1d767c1SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
63460e2d255SQu Wenruo }									\
63560e2d255SQu Wenruo void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
63660e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
63760e2d255SQu Wenruo {									\
63860e2d255SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
63960e2d255SQu Wenruo 		set_page_func(page);					\
64060e2d255SQu Wenruo 		return;							\
64160e2d255SQu Wenruo 	}								\
64260e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
64360e2d255SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
64460e2d255SQu Wenruo }									\
64560e2d255SQu Wenruo void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
64660e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
64760e2d255SQu Wenruo {									\
64860e2d255SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
64960e2d255SQu Wenruo 		clear_page_func(page);					\
65060e2d255SQu Wenruo 		return;							\
65160e2d255SQu Wenruo 	}								\
65260e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
65360e2d255SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
65460e2d255SQu Wenruo }									\
65560e2d255SQu Wenruo bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
65660e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
65760e2d255SQu Wenruo {									\
65860e2d255SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
65960e2d255SQu Wenruo 		return test_page_func(page);				\
66060e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
66160e2d255SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
662a1d767c1SQu Wenruo }
663a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
664a1d767c1SQu Wenruo 			 PageUptodate);
66503a816b3SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
666d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
667d8a5713eSQu Wenruo 			 PageDirty);
6683470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
6693470da3bSQu Wenruo 			 PageWriteback);
6706f17400bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
6716f17400bSQu Wenruo 			 PageOrdered);
672e4f94347SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
673cc1d0d93SQu Wenruo 
674cc1d0d93SQu Wenruo /*
675cc1d0d93SQu Wenruo  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
676cc1d0d93SQu Wenruo  * is cleared.
677cc1d0d93SQu Wenruo  */
678cc1d0d93SQu Wenruo void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
679cc1d0d93SQu Wenruo 				 struct page *page)
680cc1d0d93SQu Wenruo {
681cc1d0d93SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
682cc1d0d93SQu Wenruo 
683cc1d0d93SQu Wenruo 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
684cc1d0d93SQu Wenruo 		return;
685cc1d0d93SQu Wenruo 
686cc1d0d93SQu Wenruo 	ASSERT(!PageDirty(page));
687cc1d0d93SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
688cc1d0d93SQu Wenruo 		return;
689cc1d0d93SQu Wenruo 
690cc1d0d93SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
69172a69cd0SQu Wenruo 	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
692cc1d0d93SQu Wenruo }
693*e55a0de1SQu Wenruo 
694*e55a0de1SQu Wenruo /*
695*e55a0de1SQu Wenruo  * Handle different locked pages with different page sizes:
696*e55a0de1SQu Wenruo  *
697*e55a0de1SQu Wenruo  * - Page locked by plain lock_page()
698*e55a0de1SQu Wenruo  *   It should not have any subpage::writers count.
699*e55a0de1SQu Wenruo  *   Can be unlocked by unlock_page().
700*e55a0de1SQu Wenruo  *   This is the most common locked page for __extent_writepage() called
701*e55a0de1SQu Wenruo  *   inside extent_write_cache_pages() or extent_write_full_page().
702*e55a0de1SQu Wenruo  *   Rarer cases include the @locked_page from extent_write_locked_range().
703*e55a0de1SQu Wenruo  *
704*e55a0de1SQu Wenruo  * - Page locked by lock_delalloc_pages()
705*e55a0de1SQu Wenruo  *   There is only one caller, all pages except @locked_page for
706*e55a0de1SQu Wenruo  *   extent_write_locked_range().
707*e55a0de1SQu Wenruo  *   In this case, we have to call subpage helper to handle the case.
708*e55a0de1SQu Wenruo  */
709*e55a0de1SQu Wenruo void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
710*e55a0de1SQu Wenruo 			      u64 start, u32 len)
711*e55a0de1SQu Wenruo {
712*e55a0de1SQu Wenruo 	struct btrfs_subpage *subpage;
713*e55a0de1SQu Wenruo 
714*e55a0de1SQu Wenruo 	ASSERT(PageLocked(page));
715*e55a0de1SQu Wenruo 	/* For regular page size case, we just unlock the page */
716*e55a0de1SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
717*e55a0de1SQu Wenruo 		return unlock_page(page);
718*e55a0de1SQu Wenruo 
719*e55a0de1SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
720*e55a0de1SQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
721*e55a0de1SQu Wenruo 
722*e55a0de1SQu Wenruo 	/*
723*e55a0de1SQu Wenruo 	 * For subpage case, there are two types of locked page.  With or
724*e55a0de1SQu Wenruo 	 * without writers number.
725*e55a0de1SQu Wenruo 	 *
726*e55a0de1SQu Wenruo 	 * Since we own the page lock, no one else could touch subpage::writers
727*e55a0de1SQu Wenruo 	 * and we are safe to do several atomic operations without spinlock.
728*e55a0de1SQu Wenruo 	 */
729*e55a0de1SQu Wenruo 	if (atomic_read(&subpage->writers))
730*e55a0de1SQu Wenruo 		/* No writers, locked by plain lock_page() */
731*e55a0de1SQu Wenruo 		return unlock_page(page);
732*e55a0de1SQu Wenruo 
733*e55a0de1SQu Wenruo 	/* Have writers, use proper subpage helper to end it */
734*e55a0de1SQu Wenruo 	btrfs_page_end_writer_lock(fs_info, page, start, len);
735*e55a0de1SQu Wenruo }
736