xref: /openbmc/linux/fs/btrfs/subpage.c (revision 3470da3b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/slab.h>
4 #include "ctree.h"
5 #include "subpage.h"
6 
7 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
8 			 struct page *page, enum btrfs_subpage_type type)
9 {
10 	struct btrfs_subpage *subpage = NULL;
11 	int ret;
12 
13 	/*
14 	 * We have cases like a dummy extent buffer page, which is not mappped
15 	 * and doesn't need to be locked.
16 	 */
17 	if (page->mapping)
18 		ASSERT(PageLocked(page));
19 	/* Either not subpage, or the page already has private attached */
20 	if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
21 		return 0;
22 
23 	ret = btrfs_alloc_subpage(fs_info, &subpage, type);
24 	if (ret < 0)
25 		return ret;
26 	attach_page_private(page, subpage);
27 	return 0;
28 }
29 
30 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
31 			  struct page *page)
32 {
33 	struct btrfs_subpage *subpage;
34 
35 	/* Either not subpage, or already detached */
36 	if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
37 		return;
38 
39 	subpage = (struct btrfs_subpage *)detach_page_private(page);
40 	ASSERT(subpage);
41 	btrfs_free_subpage(subpage);
42 }
43 
44 int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
45 			struct btrfs_subpage **ret,
46 			enum btrfs_subpage_type type)
47 {
48 	if (fs_info->sectorsize == PAGE_SIZE)
49 		return 0;
50 
51 	*ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
52 	if (!*ret)
53 		return -ENOMEM;
54 	spin_lock_init(&(*ret)->lock);
55 	if (type == BTRFS_SUBPAGE_METADATA)
56 		atomic_set(&(*ret)->eb_refs, 0);
57 	else
58 		atomic_set(&(*ret)->readers, 0);
59 	return 0;
60 }
61 
62 void btrfs_free_subpage(struct btrfs_subpage *subpage)
63 {
64 	kfree(subpage);
65 }
66 
67 /*
68  * Increase the eb_refs of current subpage.
69  *
70  * This is important for eb allocation, to prevent race with last eb freeing
71  * of the same page.
72  * With the eb_refs increased before the eb inserted into radix tree,
73  * detach_extent_buffer_page() won't detach the page private while we're still
74  * allocating the extent buffer.
75  */
76 void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
77 			    struct page *page)
78 {
79 	struct btrfs_subpage *subpage;
80 
81 	if (fs_info->sectorsize == PAGE_SIZE)
82 		return;
83 
84 	ASSERT(PagePrivate(page) && page->mapping);
85 	lockdep_assert_held(&page->mapping->private_lock);
86 
87 	subpage = (struct btrfs_subpage *)page->private;
88 	atomic_inc(&subpage->eb_refs);
89 }
90 
91 void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
92 			    struct page *page)
93 {
94 	struct btrfs_subpage *subpage;
95 
96 	if (fs_info->sectorsize == PAGE_SIZE)
97 		return;
98 
99 	ASSERT(PagePrivate(page) && page->mapping);
100 	lockdep_assert_held(&page->mapping->private_lock);
101 
102 	subpage = (struct btrfs_subpage *)page->private;
103 	ASSERT(atomic_read(&subpage->eb_refs));
104 	atomic_dec(&subpage->eb_refs);
105 }
106 
107 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
108 		struct page *page, u64 start, u32 len)
109 {
110 	/* Basic checks */
111 	ASSERT(PagePrivate(page) && page->private);
112 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
113 	       IS_ALIGNED(len, fs_info->sectorsize));
114 	/*
115 	 * The range check only works for mapped page, we can still have
116 	 * unmapped page like dummy extent buffer pages.
117 	 */
118 	if (page->mapping)
119 		ASSERT(page_offset(page) <= start &&
120 		       start + len <= page_offset(page) + PAGE_SIZE);
121 }
122 
123 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
124 		struct page *page, u64 start, u32 len)
125 {
126 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
127 	const int nbits = len >> fs_info->sectorsize_bits;
128 	int ret;
129 
130 	btrfs_subpage_assert(fs_info, page, start, len);
131 
132 	ret = atomic_add_return(nbits, &subpage->readers);
133 	ASSERT(ret == nbits);
134 }
135 
136 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
137 		struct page *page, u64 start, u32 len)
138 {
139 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
140 	const int nbits = len >> fs_info->sectorsize_bits;
141 
142 	btrfs_subpage_assert(fs_info, page, start, len);
143 	ASSERT(atomic_read(&subpage->readers) >= nbits);
144 	if (atomic_sub_and_test(nbits, &subpage->readers))
145 		unlock_page(page);
146 }
147 
148 /*
149  * Convert the [start, start + len) range into a u16 bitmap
150  *
151  * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0.
152  */
153 static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info,
154 		struct page *page, u64 start, u32 len)
155 {
156 	const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits;
157 	const int nbits = len >> fs_info->sectorsize_bits;
158 
159 	btrfs_subpage_assert(fs_info, page, start, len);
160 
161 	/*
162 	 * Here nbits can be 16, thus can go beyond u16 range. We make the
163 	 * first left shift to be calculate in unsigned long (at least u32),
164 	 * then truncate the result to u16.
165 	 */
166 	return (u16)(((1UL << nbits) - 1) << bit_start);
167 }
168 
169 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
170 		struct page *page, u64 start, u32 len)
171 {
172 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
173 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
174 	unsigned long flags;
175 
176 	spin_lock_irqsave(&subpage->lock, flags);
177 	subpage->uptodate_bitmap |= tmp;
178 	if (subpage->uptodate_bitmap == U16_MAX)
179 		SetPageUptodate(page);
180 	spin_unlock_irqrestore(&subpage->lock, flags);
181 }
182 
183 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
184 		struct page *page, u64 start, u32 len)
185 {
186 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
187 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&subpage->lock, flags);
191 	subpage->uptodate_bitmap &= ~tmp;
192 	ClearPageUptodate(page);
193 	spin_unlock_irqrestore(&subpage->lock, flags);
194 }
195 
196 void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
197 		struct page *page, u64 start, u32 len)
198 {
199 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
200 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
201 	unsigned long flags;
202 
203 	spin_lock_irqsave(&subpage->lock, flags);
204 	subpage->error_bitmap |= tmp;
205 	SetPageError(page);
206 	spin_unlock_irqrestore(&subpage->lock, flags);
207 }
208 
209 void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
210 		struct page *page, u64 start, u32 len)
211 {
212 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
213 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
214 	unsigned long flags;
215 
216 	spin_lock_irqsave(&subpage->lock, flags);
217 	subpage->error_bitmap &= ~tmp;
218 	if (subpage->error_bitmap == 0)
219 		ClearPageError(page);
220 	spin_unlock_irqrestore(&subpage->lock, flags);
221 }
222 
223 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
224 		struct page *page, u64 start, u32 len)
225 {
226 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
227 	u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
228 	unsigned long flags;
229 
230 	spin_lock_irqsave(&subpage->lock, flags);
231 	subpage->dirty_bitmap |= tmp;
232 	spin_unlock_irqrestore(&subpage->lock, flags);
233 	set_page_dirty(page);
234 }
235 
236 /*
237  * Extra clear_and_test function for subpage dirty bitmap.
238  *
239  * Return true if we're the last bits in the dirty_bitmap and clear the
240  * dirty_bitmap.
241  * Return false otherwise.
242  *
243  * NOTE: Callers should manually clear page dirty for true case, as we have
244  * extra handling for tree blocks.
245  */
246 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
247 		struct page *page, u64 start, u32 len)
248 {
249 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
250 	u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
251 	unsigned long flags;
252 	bool last = false;
253 
254 	spin_lock_irqsave(&subpage->lock, flags);
255 	subpage->dirty_bitmap &= ~tmp;
256 	if (subpage->dirty_bitmap == 0)
257 		last = true;
258 	spin_unlock_irqrestore(&subpage->lock, flags);
259 	return last;
260 }
261 
262 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
263 		struct page *page, u64 start, u32 len)
264 {
265 	bool last;
266 
267 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
268 	if (last)
269 		clear_page_dirty_for_io(page);
270 }
271 
272 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
273 		struct page *page, u64 start, u32 len)
274 {
275 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
276 	u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
277 	unsigned long flags;
278 
279 	spin_lock_irqsave(&subpage->lock, flags);
280 	subpage->writeback_bitmap |= tmp;
281 	set_page_writeback(page);
282 	spin_unlock_irqrestore(&subpage->lock, flags);
283 }
284 
285 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
286 		struct page *page, u64 start, u32 len)
287 {
288 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
289 	u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
290 	unsigned long flags;
291 
292 	spin_lock_irqsave(&subpage->lock, flags);
293 	subpage->writeback_bitmap &= ~tmp;
294 	if (subpage->writeback_bitmap == 0)
295 		end_page_writeback(page);
296 	spin_unlock_irqrestore(&subpage->lock, flags);
297 }
298 
299 /*
300  * Unlike set/clear which is dependent on each page status, for test all bits
301  * are tested in the same way.
302  */
303 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
304 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
305 		struct page *page, u64 start, u32 len)			\
306 {									\
307 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
308 	const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \
309 	unsigned long flags;						\
310 	bool ret;							\
311 									\
312 	spin_lock_irqsave(&subpage->lock, flags);			\
313 	ret = ((subpage->name##_bitmap & tmp) == tmp);			\
314 	spin_unlock_irqrestore(&subpage->lock, flags);			\
315 	return ret;							\
316 }
317 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
318 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
319 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
320 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
321 
322 /*
323  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
324  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
325  * back to regular sectorsize branch.
326  */
327 #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,	\
328 			       test_page_func)				\
329 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,		\
330 		struct page *page, u64 start, u32 len)			\
331 {									\
332 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
333 		set_page_func(page);					\
334 		return;							\
335 	}								\
336 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
337 }									\
338 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,	\
339 		struct page *page, u64 start, u32 len)			\
340 {									\
341 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
342 		clear_page_func(page);					\
343 		return;							\
344 	}								\
345 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
346 }									\
347 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,	\
348 		struct page *page, u64 start, u32 len)			\
349 {									\
350 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
351 		return test_page_func(page);				\
352 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
353 }
354 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
355 			 PageUptodate);
356 IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
357 IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
358 			 PageDirty);
359 IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
360 			 PageWriteback);
361