xref: /openbmc/linux/fs/btrfs/subpage.c (revision 72a69cd0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/slab.h>
4 #include "ctree.h"
5 #include "subpage.h"
6 #include "btrfs_inode.h"
7 
8 /*
9  * Subpage (sectorsize < PAGE_SIZE) support overview:
10  *
11  * Limitations:
12  *
13  * - Only support 64K page size for now
14  *   This is to make metadata handling easier, as 64K page would ensure
15  *   all nodesize would fit inside one page, thus we don't need to handle
16  *   cases where a tree block crosses several pages.
17  *
18  * - Only metadata read-write for now
19  *   The data read-write part is in development.
20  *
21  * - Metadata can't cross 64K page boundary
22  *   btrfs-progs and kernel have done that for a while, thus only ancient
23  *   filesystems could have such problem.  For such case, do a graceful
24  *   rejection.
25  *
26  * Special behavior:
27  *
28  * - Metadata
29  *   Metadata read is fully supported.
30  *   Meaning when reading one tree block will only trigger the read for the
31  *   needed range, other unrelated range in the same page will not be touched.
32  *
33  *   Metadata write support is partial.
34  *   The writeback is still for the full page, but we will only submit
35  *   the dirty extent buffers in the page.
36  *
37  *   This means, if we have a metadata page like this:
38  *
39  *   Page offset
40  *   0         16K         32K         48K        64K
41  *   |/////////|           |///////////|
42  *        \- Tree block A        \- Tree block B
43  *
44  *   Even if we just want to writeback tree block A, we will also writeback
45  *   tree block B if it's also dirty.
46  *
47  *   This may cause extra metadata writeback which results more COW.
48  *
49  * Implementation:
50  *
51  * - Common
52  *   Both metadata and data will use a new structure, btrfs_subpage, to
53  *   record the status of each sector inside a page.  This provides the extra
54  *   granularity needed.
55  *
56  * - Metadata
57  *   Since we have multiple tree blocks inside one page, we can't rely on page
58  *   locking anymore, or we will have greatly reduced concurrency or even
59  *   deadlocks (hold one tree lock while trying to lock another tree lock in
60  *   the same page).
61  *
62  *   Thus for metadata locking, subpage support relies on io_tree locking only.
63  *   This means a slightly higher tree locking latency.
64  */
65 
66 void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
67 {
68 	unsigned int cur = 0;
69 	unsigned int nr_bits;
70 
71 	ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
72 
73 	nr_bits = PAGE_SIZE / sectorsize;
74 	subpage_info->bitmap_nr_bits = nr_bits;
75 
76 	subpage_info->uptodate_offset = cur;
77 	cur += nr_bits;
78 
79 	subpage_info->error_offset = cur;
80 	cur += nr_bits;
81 
82 	subpage_info->dirty_offset = cur;
83 	cur += nr_bits;
84 
85 	subpage_info->writeback_offset = cur;
86 	cur += nr_bits;
87 
88 	subpage_info->ordered_offset = cur;
89 	cur += nr_bits;
90 
91 	subpage_info->total_nr_bits = cur;
92 }
93 
94 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
95 			 struct page *page, enum btrfs_subpage_type type)
96 {
97 	struct btrfs_subpage *subpage;
98 
99 	/*
100 	 * We have cases like a dummy extent buffer page, which is not mappped
101 	 * and doesn't need to be locked.
102 	 */
103 	if (page->mapping)
104 		ASSERT(PageLocked(page));
105 
106 	/* Either not subpage, or the page already has private attached */
107 	if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
108 		return 0;
109 
110 	subpage = btrfs_alloc_subpage(fs_info, type);
111 	if (IS_ERR(subpage))
112 		return  PTR_ERR(subpage);
113 
114 	attach_page_private(page, subpage);
115 	return 0;
116 }
117 
118 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
119 			  struct page *page)
120 {
121 	struct btrfs_subpage *subpage;
122 
123 	/* Either not subpage, or already detached */
124 	if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
125 		return;
126 
127 	subpage = (struct btrfs_subpage *)detach_page_private(page);
128 	ASSERT(subpage);
129 	btrfs_free_subpage(subpage);
130 }
131 
132 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
133 					  enum btrfs_subpage_type type)
134 {
135 	struct btrfs_subpage *ret;
136 	unsigned int real_size;
137 
138 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
139 
140 	real_size = struct_size(ret, bitmaps,
141 			BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
142 	ret = kzalloc(real_size, GFP_NOFS);
143 	if (!ret)
144 		return ERR_PTR(-ENOMEM);
145 
146 	spin_lock_init(&ret->lock);
147 	if (type == BTRFS_SUBPAGE_METADATA) {
148 		atomic_set(&ret->eb_refs, 0);
149 	} else {
150 		atomic_set(&ret->readers, 0);
151 		atomic_set(&ret->writers, 0);
152 	}
153 	return ret;
154 }
155 
156 void btrfs_free_subpage(struct btrfs_subpage *subpage)
157 {
158 	kfree(subpage);
159 }
160 
161 /*
162  * Increase the eb_refs of current subpage.
163  *
164  * This is important for eb allocation, to prevent race with last eb freeing
165  * of the same page.
166  * With the eb_refs increased before the eb inserted into radix tree,
167  * detach_extent_buffer_page() won't detach the page private while we're still
168  * allocating the extent buffer.
169  */
170 void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
171 			    struct page *page)
172 {
173 	struct btrfs_subpage *subpage;
174 
175 	if (fs_info->sectorsize == PAGE_SIZE)
176 		return;
177 
178 	ASSERT(PagePrivate(page) && page->mapping);
179 	lockdep_assert_held(&page->mapping->private_lock);
180 
181 	subpage = (struct btrfs_subpage *)page->private;
182 	atomic_inc(&subpage->eb_refs);
183 }
184 
185 void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
186 			    struct page *page)
187 {
188 	struct btrfs_subpage *subpage;
189 
190 	if (fs_info->sectorsize == PAGE_SIZE)
191 		return;
192 
193 	ASSERT(PagePrivate(page) && page->mapping);
194 	lockdep_assert_held(&page->mapping->private_lock);
195 
196 	subpage = (struct btrfs_subpage *)page->private;
197 	ASSERT(atomic_read(&subpage->eb_refs));
198 	atomic_dec(&subpage->eb_refs);
199 }
200 
201 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
202 		struct page *page, u64 start, u32 len)
203 {
204 	/* Basic checks */
205 	ASSERT(PagePrivate(page) && page->private);
206 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
207 	       IS_ALIGNED(len, fs_info->sectorsize));
208 	/*
209 	 * The range check only works for mapped page, we can still have
210 	 * unmapped page like dummy extent buffer pages.
211 	 */
212 	if (page->mapping)
213 		ASSERT(page_offset(page) <= start &&
214 		       start + len <= page_offset(page) + PAGE_SIZE);
215 }
216 
217 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
218 		struct page *page, u64 start, u32 len)
219 {
220 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
221 	const int nbits = len >> fs_info->sectorsize_bits;
222 
223 	btrfs_subpage_assert(fs_info, page, start, len);
224 
225 	atomic_add(nbits, &subpage->readers);
226 }
227 
228 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
229 		struct page *page, u64 start, u32 len)
230 {
231 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
232 	const int nbits = len >> fs_info->sectorsize_bits;
233 	bool is_data;
234 	bool last;
235 
236 	btrfs_subpage_assert(fs_info, page, start, len);
237 	is_data = is_data_inode(page->mapping->host);
238 	ASSERT(atomic_read(&subpage->readers) >= nbits);
239 	last = atomic_sub_and_test(nbits, &subpage->readers);
240 
241 	/*
242 	 * For data we need to unlock the page if the last read has finished.
243 	 *
244 	 * And please don't replace @last with atomic_sub_and_test() call
245 	 * inside if () condition.
246 	 * As we want the atomic_sub_and_test() to be always executed.
247 	 */
248 	if (is_data && last)
249 		unlock_page(page);
250 }
251 
252 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
253 {
254 	u64 orig_start = *start;
255 	u32 orig_len = *len;
256 
257 	*start = max_t(u64, page_offset(page), orig_start);
258 	*len = min_t(u64, page_offset(page) + PAGE_SIZE,
259 		     orig_start + orig_len) - *start;
260 }
261 
262 void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
263 		struct page *page, u64 start, u32 len)
264 {
265 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
266 	const int nbits = (len >> fs_info->sectorsize_bits);
267 	int ret;
268 
269 	btrfs_subpage_assert(fs_info, page, start, len);
270 
271 	ASSERT(atomic_read(&subpage->readers) == 0);
272 	ret = atomic_add_return(nbits, &subpage->writers);
273 	ASSERT(ret == nbits);
274 }
275 
276 bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
277 		struct page *page, u64 start, u32 len)
278 {
279 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
280 	const int nbits = (len >> fs_info->sectorsize_bits);
281 
282 	btrfs_subpage_assert(fs_info, page, start, len);
283 
284 	ASSERT(atomic_read(&subpage->writers) >= nbits);
285 	return atomic_sub_and_test(nbits, &subpage->writers);
286 }
287 
288 /*
289  * Lock a page for delalloc page writeback.
290  *
291  * Return -EAGAIN if the page is not properly initialized.
292  * Return 0 with the page locked, and writer counter updated.
293  *
294  * Even with 0 returned, the page still need extra check to make sure
295  * it's really the correct page, as the caller is using
296  * find_get_pages_contig(), which can race with page invalidating.
297  */
298 int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
299 		struct page *page, u64 start, u32 len)
300 {
301 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {
302 		lock_page(page);
303 		return 0;
304 	}
305 	lock_page(page);
306 	if (!PagePrivate(page) || !page->private) {
307 		unlock_page(page);
308 		return -EAGAIN;
309 	}
310 	btrfs_subpage_clamp_range(page, &start, &len);
311 	btrfs_subpage_start_writer(fs_info, page, start, len);
312 	return 0;
313 }
314 
315 void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
316 		struct page *page, u64 start, u32 len)
317 {
318 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)
319 		return unlock_page(page);
320 	btrfs_subpage_clamp_range(page, &start, &len);
321 	if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
322 		unlock_page(page);
323 }
324 
325 static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
326 				      unsigned int nbits)
327 {
328 	unsigned int found_zero;
329 
330 	found_zero = find_next_zero_bit(addr, start + nbits, start);
331 	if (found_zero == start + nbits)
332 		return true;
333 	return false;
334 }
335 
336 static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
337 				       unsigned int nbits)
338 {
339 	unsigned int found_set;
340 
341 	found_set = find_next_bit(addr, start + nbits, start);
342 	if (found_set == start + nbits)
343 		return true;
344 	return false;
345 }
346 
347 #define subpage_calc_start_bit(fs_info, page, name, start, len)		\
348 ({									\
349 	unsigned int start_bit;						\
350 									\
351 	btrfs_subpage_assert(fs_info, page, start, len);		\
352 	start_bit = offset_in_page(start) >> fs_info->sectorsize_bits;	\
353 	start_bit += fs_info->subpage_info->name##_offset;		\
354 	start_bit;							\
355 })
356 
357 #define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
358 	bitmap_test_range_all_set(subpage->bitmaps,			\
359 			fs_info->subpage_info->name##_offset,		\
360 			fs_info->subpage_info->bitmap_nr_bits)
361 
362 #define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
363 	bitmap_test_range_all_zero(subpage->bitmaps,			\
364 			fs_info->subpage_info->name##_offset,		\
365 			fs_info->subpage_info->bitmap_nr_bits)
366 
367 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
368 		struct page *page, u64 start, u32 len)
369 {
370 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
371 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
372 							uptodate, start, len);
373 	unsigned long flags;
374 
375 	spin_lock_irqsave(&subpage->lock, flags);
376 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
377 	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
378 		SetPageUptodate(page);
379 	spin_unlock_irqrestore(&subpage->lock, flags);
380 }
381 
382 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
383 		struct page *page, u64 start, u32 len)
384 {
385 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
386 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
387 							uptodate, start, len);
388 	unsigned long flags;
389 
390 	spin_lock_irqsave(&subpage->lock, flags);
391 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
392 	ClearPageUptodate(page);
393 	spin_unlock_irqrestore(&subpage->lock, flags);
394 }
395 
396 void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
397 		struct page *page, u64 start, u32 len)
398 {
399 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
400 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
401 							error, start, len);
402 	unsigned long flags;
403 
404 	spin_lock_irqsave(&subpage->lock, flags);
405 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
406 	SetPageError(page);
407 	spin_unlock_irqrestore(&subpage->lock, flags);
408 }
409 
410 void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
411 		struct page *page, u64 start, u32 len)
412 {
413 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
414 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
415 							error, start, len);
416 	unsigned long flags;
417 
418 	spin_lock_irqsave(&subpage->lock, flags);
419 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
420 	if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
421 		ClearPageError(page);
422 	spin_unlock_irqrestore(&subpage->lock, flags);
423 }
424 
425 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
426 		struct page *page, u64 start, u32 len)
427 {
428 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
429 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
430 							dirty, start, len);
431 	unsigned long flags;
432 
433 	spin_lock_irqsave(&subpage->lock, flags);
434 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
435 	spin_unlock_irqrestore(&subpage->lock, flags);
436 	set_page_dirty(page);
437 }
438 
439 /*
440  * Extra clear_and_test function for subpage dirty bitmap.
441  *
442  * Return true if we're the last bits in the dirty_bitmap and clear the
443  * dirty_bitmap.
444  * Return false otherwise.
445  *
446  * NOTE: Callers should manually clear page dirty for true case, as we have
447  * extra handling for tree blocks.
448  */
449 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
450 		struct page *page, u64 start, u32 len)
451 {
452 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
453 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
454 							dirty, start, len);
455 	unsigned long flags;
456 	bool last = false;
457 
458 	spin_lock_irqsave(&subpage->lock, flags);
459 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
460 	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
461 		last = true;
462 	spin_unlock_irqrestore(&subpage->lock, flags);
463 	return last;
464 }
465 
466 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
467 		struct page *page, u64 start, u32 len)
468 {
469 	bool last;
470 
471 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
472 	if (last)
473 		clear_page_dirty_for_io(page);
474 }
475 
476 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
477 		struct page *page, u64 start, u32 len)
478 {
479 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
480 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
481 							writeback, start, len);
482 	unsigned long flags;
483 
484 	spin_lock_irqsave(&subpage->lock, flags);
485 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
486 	set_page_writeback(page);
487 	spin_unlock_irqrestore(&subpage->lock, flags);
488 }
489 
490 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
491 		struct page *page, u64 start, u32 len)
492 {
493 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
494 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
495 							writeback, start, len);
496 	unsigned long flags;
497 
498 	spin_lock_irqsave(&subpage->lock, flags);
499 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
500 	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
501 		ASSERT(PageWriteback(page));
502 		end_page_writeback(page);
503 	}
504 	spin_unlock_irqrestore(&subpage->lock, flags);
505 }
506 
507 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
508 		struct page *page, u64 start, u32 len)
509 {
510 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
511 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
512 							ordered, start, len);
513 	unsigned long flags;
514 
515 	spin_lock_irqsave(&subpage->lock, flags);
516 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
517 	SetPageOrdered(page);
518 	spin_unlock_irqrestore(&subpage->lock, flags);
519 }
520 
521 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
522 		struct page *page, u64 start, u32 len)
523 {
524 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
525 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
526 							ordered, start, len);
527 	unsigned long flags;
528 
529 	spin_lock_irqsave(&subpage->lock, flags);
530 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
531 	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
532 		ClearPageOrdered(page);
533 	spin_unlock_irqrestore(&subpage->lock, flags);
534 }
535 /*
536  * Unlike set/clear which is dependent on each page status, for test all bits
537  * are tested in the same way.
538  */
539 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
540 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
541 		struct page *page, u64 start, u32 len)			\
542 {									\
543 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
544 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,	\
545 						name, start, len);	\
546 	unsigned long flags;						\
547 	bool ret;							\
548 									\
549 	spin_lock_irqsave(&subpage->lock, flags);			\
550 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
551 				len >> fs_info->sectorsize_bits);	\
552 	spin_unlock_irqrestore(&subpage->lock, flags);			\
553 	return ret;							\
554 }
555 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
556 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
557 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
558 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
559 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
560 
561 /*
562  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
563  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
564  * back to regular sectorsize branch.
565  */
566 #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,	\
567 			       test_page_func)				\
568 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,		\
569 		struct page *page, u64 start, u32 len)			\
570 {									\
571 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
572 		set_page_func(page);					\
573 		return;							\
574 	}								\
575 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
576 }									\
577 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,	\
578 		struct page *page, u64 start, u32 len)			\
579 {									\
580 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
581 		clear_page_func(page);					\
582 		return;							\
583 	}								\
584 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
585 }									\
586 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,	\
587 		struct page *page, u64 start, u32 len)			\
588 {									\
589 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
590 		return test_page_func(page);				\
591 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
592 }									\
593 void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
594 		struct page *page, u64 start, u32 len)			\
595 {									\
596 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
597 		set_page_func(page);					\
598 		return;							\
599 	}								\
600 	btrfs_subpage_clamp_range(page, &start, &len);			\
601 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
602 }									\
603 void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
604 		struct page *page, u64 start, u32 len)			\
605 {									\
606 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {	\
607 		clear_page_func(page);					\
608 		return;							\
609 	}								\
610 	btrfs_subpage_clamp_range(page, &start, &len);			\
611 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
612 }									\
613 bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
614 		struct page *page, u64 start, u32 len)			\
615 {									\
616 	if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)	\
617 		return test_page_func(page);				\
618 	btrfs_subpage_clamp_range(page, &start, &len);			\
619 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
620 }
621 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
622 			 PageUptodate);
623 IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
624 IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
625 			 PageDirty);
626 IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
627 			 PageWriteback);
628 IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
629 			 PageOrdered);
630 
631 /*
632  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
633  * is cleared.
634  */
635 void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
636 				 struct page *page)
637 {
638 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
639 
640 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
641 		return;
642 
643 	ASSERT(!PageDirty(page));
644 	if (fs_info->sectorsize == PAGE_SIZE)
645 		return;
646 
647 	ASSERT(PagePrivate(page) && page->private);
648 	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
649 }
650