xref: /openbmc/linux/fs/btrfs/subpage.c (revision a1d767c1)
1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0
2cac06d84SQu Wenruo 
3cac06d84SQu Wenruo #include <linux/slab.h>
4cac06d84SQu Wenruo #include "ctree.h"
5cac06d84SQu Wenruo #include "subpage.h"
6cac06d84SQu Wenruo 
7cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
8cac06d84SQu Wenruo 			 struct page *page, enum btrfs_subpage_type type)
9cac06d84SQu Wenruo {
10760f991fSQu Wenruo 	struct btrfs_subpage *subpage = NULL;
11760f991fSQu Wenruo 	int ret;
12cac06d84SQu Wenruo 
13cac06d84SQu Wenruo 	/*
14cac06d84SQu Wenruo 	 * We have cases like a dummy extent buffer page, which is not mappped
15cac06d84SQu Wenruo 	 * and doesn't need to be locked.
16cac06d84SQu Wenruo 	 */
17cac06d84SQu Wenruo 	if (page->mapping)
18cac06d84SQu Wenruo 		ASSERT(PageLocked(page));
19cac06d84SQu Wenruo 	/* Either not subpage, or the page already has private attached */
20cac06d84SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
21cac06d84SQu Wenruo 		return 0;
22cac06d84SQu Wenruo 
23760f991fSQu Wenruo 	ret = btrfs_alloc_subpage(fs_info, &subpage, type);
24760f991fSQu Wenruo 	if (ret < 0)
25760f991fSQu Wenruo 		return ret;
26cac06d84SQu Wenruo 	attach_page_private(page, subpage);
27cac06d84SQu Wenruo 	return 0;
28cac06d84SQu Wenruo }
29cac06d84SQu Wenruo 
30cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
31cac06d84SQu Wenruo 			  struct page *page)
32cac06d84SQu Wenruo {
33cac06d84SQu Wenruo 	struct btrfs_subpage *subpage;
34cac06d84SQu Wenruo 
35cac06d84SQu Wenruo 	/* Either not subpage, or already detached */
36cac06d84SQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
37cac06d84SQu Wenruo 		return;
38cac06d84SQu Wenruo 
39cac06d84SQu Wenruo 	subpage = (struct btrfs_subpage *)detach_page_private(page);
40cac06d84SQu Wenruo 	ASSERT(subpage);
41760f991fSQu Wenruo 	btrfs_free_subpage(subpage);
42760f991fSQu Wenruo }
43760f991fSQu Wenruo 
44760f991fSQu Wenruo int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
45760f991fSQu Wenruo 			struct btrfs_subpage **ret,
46760f991fSQu Wenruo 			enum btrfs_subpage_type type)
47760f991fSQu Wenruo {
48760f991fSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
49760f991fSQu Wenruo 		return 0;
50760f991fSQu Wenruo 
51760f991fSQu Wenruo 	*ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
52760f991fSQu Wenruo 	if (!*ret)
53760f991fSQu Wenruo 		return -ENOMEM;
54760f991fSQu Wenruo 	spin_lock_init(&(*ret)->lock);
558ff8466dSQu Wenruo 	if (type == BTRFS_SUBPAGE_METADATA)
568ff8466dSQu Wenruo 		atomic_set(&(*ret)->eb_refs, 0);
57760f991fSQu Wenruo 	return 0;
58760f991fSQu Wenruo }
59760f991fSQu Wenruo 
60760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage)
61760f991fSQu Wenruo {
62cac06d84SQu Wenruo 	kfree(subpage);
63cac06d84SQu Wenruo }
648ff8466dSQu Wenruo 
658ff8466dSQu Wenruo /*
668ff8466dSQu Wenruo  * Increase the eb_refs of current subpage.
678ff8466dSQu Wenruo  *
688ff8466dSQu Wenruo  * This is important for eb allocation, to prevent race with last eb freeing
698ff8466dSQu Wenruo  * of the same page.
708ff8466dSQu Wenruo  * With the eb_refs increased before the eb inserted into radix tree,
718ff8466dSQu Wenruo  * detach_extent_buffer_page() won't detach the page private while we're still
728ff8466dSQu Wenruo  * allocating the extent buffer.
738ff8466dSQu Wenruo  */
748ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
758ff8466dSQu Wenruo 			    struct page *page)
768ff8466dSQu Wenruo {
778ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
788ff8466dSQu Wenruo 
798ff8466dSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
808ff8466dSQu Wenruo 		return;
818ff8466dSQu Wenruo 
828ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
838ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
848ff8466dSQu Wenruo 
858ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
868ff8466dSQu Wenruo 	atomic_inc(&subpage->eb_refs);
878ff8466dSQu Wenruo }
888ff8466dSQu Wenruo 
898ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
908ff8466dSQu Wenruo 			    struct page *page)
918ff8466dSQu Wenruo {
928ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
938ff8466dSQu Wenruo 
948ff8466dSQu Wenruo 	if (fs_info->sectorsize == PAGE_SIZE)
958ff8466dSQu Wenruo 		return;
968ff8466dSQu Wenruo 
978ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
988ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
998ff8466dSQu Wenruo 
1008ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
1018ff8466dSQu Wenruo 	ASSERT(atomic_read(&subpage->eb_refs));
1028ff8466dSQu Wenruo 	atomic_dec(&subpage->eb_refs);
1038ff8466dSQu Wenruo }
104*a1d767c1SQu Wenruo 
105*a1d767c1SQu Wenruo /*
106*a1d767c1SQu Wenruo  * Convert the [start, start + len) range into a u16 bitmap
107*a1d767c1SQu Wenruo  *
108*a1d767c1SQu Wenruo  * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0.
109*a1d767c1SQu Wenruo  */
110*a1d767c1SQu Wenruo static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info,
111*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
112*a1d767c1SQu Wenruo {
113*a1d767c1SQu Wenruo 	const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits;
114*a1d767c1SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
115*a1d767c1SQu Wenruo 
116*a1d767c1SQu Wenruo 	/* Basic checks */
117*a1d767c1SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
118*a1d767c1SQu Wenruo 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
119*a1d767c1SQu Wenruo 	       IS_ALIGNED(len, fs_info->sectorsize));
120*a1d767c1SQu Wenruo 
121*a1d767c1SQu Wenruo 	/*
122*a1d767c1SQu Wenruo 	 * The range check only works for mapped page, we can still have
123*a1d767c1SQu Wenruo 	 * unmapped page like dummy extent buffer pages.
124*a1d767c1SQu Wenruo 	 */
125*a1d767c1SQu Wenruo 	if (page->mapping)
126*a1d767c1SQu Wenruo 		ASSERT(page_offset(page) <= start &&
127*a1d767c1SQu Wenruo 		       start + len <= page_offset(page) + PAGE_SIZE);
128*a1d767c1SQu Wenruo 	/*
129*a1d767c1SQu Wenruo 	 * Here nbits can be 16, thus can go beyond u16 range. We make the
130*a1d767c1SQu Wenruo 	 * first left shift to be calculate in unsigned long (at least u32),
131*a1d767c1SQu Wenruo 	 * then truncate the result to u16.
132*a1d767c1SQu Wenruo 	 */
133*a1d767c1SQu Wenruo 	return (u16)(((1UL << nbits) - 1) << bit_start);
134*a1d767c1SQu Wenruo }
135*a1d767c1SQu Wenruo 
136*a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
137*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
138*a1d767c1SQu Wenruo {
139*a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
140*a1d767c1SQu Wenruo 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
141*a1d767c1SQu Wenruo 	unsigned long flags;
142*a1d767c1SQu Wenruo 
143*a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
144*a1d767c1SQu Wenruo 	subpage->uptodate_bitmap |= tmp;
145*a1d767c1SQu Wenruo 	if (subpage->uptodate_bitmap == U16_MAX)
146*a1d767c1SQu Wenruo 		SetPageUptodate(page);
147*a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
148*a1d767c1SQu Wenruo }
149*a1d767c1SQu Wenruo 
150*a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
151*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
152*a1d767c1SQu Wenruo {
153*a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
154*a1d767c1SQu Wenruo 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
155*a1d767c1SQu Wenruo 	unsigned long flags;
156*a1d767c1SQu Wenruo 
157*a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
158*a1d767c1SQu Wenruo 	subpage->uptodate_bitmap &= ~tmp;
159*a1d767c1SQu Wenruo 	ClearPageUptodate(page);
160*a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
161*a1d767c1SQu Wenruo }
162*a1d767c1SQu Wenruo 
163*a1d767c1SQu Wenruo /*
164*a1d767c1SQu Wenruo  * Unlike set/clear which is dependent on each page status, for test all bits
165*a1d767c1SQu Wenruo  * are tested in the same way.
166*a1d767c1SQu Wenruo  */
167*a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
168*a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
169*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
170*a1d767c1SQu Wenruo {									\
171*a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
172*a1d767c1SQu Wenruo 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \
173*a1d767c1SQu Wenruo 	unsigned long flags;						\
174*a1d767c1SQu Wenruo 	bool ret;							\
175*a1d767c1SQu Wenruo 									\
176*a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);			\
177*a1d767c1SQu Wenruo 	ret = ((subpage->name##_bitmap & tmp) == tmp);			\
178*a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);			\
179*a1d767c1SQu Wenruo 	return ret;							\
180*a1d767c1SQu Wenruo }
181*a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
182*a1d767c1SQu Wenruo 
183*a1d767c1SQu Wenruo /*
184*a1d767c1SQu Wenruo  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
185*a1d767c1SQu Wenruo  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
186*a1d767c1SQu Wenruo  * back to regular sectorsize branch.
187*a1d767c1SQu Wenruo  */
188*a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,	\
189*a1d767c1SQu Wenruo 			       test_page_func)				\
190*a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,		\
191*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
192*a1d767c1SQu Wenruo {									\
193*a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
194*a1d767c1SQu Wenruo 		set_page_func(page);					\
195*a1d767c1SQu Wenruo 		return;							\
196*a1d767c1SQu Wenruo 	}								\
197*a1d767c1SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
198*a1d767c1SQu Wenruo }									\
199*a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,	\
200*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
201*a1d767c1SQu Wenruo {									\
202*a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
203*a1d767c1SQu Wenruo 		clear_page_func(page);					\
204*a1d767c1SQu Wenruo 		return;							\
205*a1d767c1SQu Wenruo 	}								\
206*a1d767c1SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
207*a1d767c1SQu Wenruo }									\
208*a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,	\
209*a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
210*a1d767c1SQu Wenruo {									\
211*a1d767c1SQu Wenruo 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
212*a1d767c1SQu Wenruo 		return test_page_func(page);				\
213*a1d767c1SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
214*a1d767c1SQu Wenruo }
215*a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
216*a1d767c1SQu Wenruo 			 PageUptodate);
217