1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0 2cac06d84SQu Wenruo 3cac06d84SQu Wenruo #include <linux/slab.h> 4cac06d84SQu Wenruo #include "ctree.h" 5cac06d84SQu Wenruo #include "subpage.h" 6cac06d84SQu Wenruo 7*894d1378SQu Wenruo /* 8*894d1378SQu Wenruo * Subpage (sectorsize < PAGE_SIZE) support overview: 9*894d1378SQu Wenruo * 10*894d1378SQu Wenruo * Limitations: 11*894d1378SQu Wenruo * 12*894d1378SQu Wenruo * - Only support 64K page size for now 13*894d1378SQu Wenruo * This is to make metadata handling easier, as 64K page would ensure 14*894d1378SQu Wenruo * all nodesize would fit inside one page, thus we don't need to handle 15*894d1378SQu Wenruo * cases where a tree block crosses several pages. 16*894d1378SQu Wenruo * 17*894d1378SQu Wenruo * - Only metadata read-write for now 18*894d1378SQu Wenruo * The data read-write part is in development. 19*894d1378SQu Wenruo * 20*894d1378SQu Wenruo * - Metadata can't cross 64K page boundary 21*894d1378SQu Wenruo * btrfs-progs and kernel have done that for a while, thus only ancient 22*894d1378SQu Wenruo * filesystems could have such problem. For such case, do a graceful 23*894d1378SQu Wenruo * rejection. 24*894d1378SQu Wenruo * 25*894d1378SQu Wenruo * Special behavior: 26*894d1378SQu Wenruo * 27*894d1378SQu Wenruo * - Metadata 28*894d1378SQu Wenruo * Metadata read is fully supported. 29*894d1378SQu Wenruo * Meaning when reading one tree block will only trigger the read for the 30*894d1378SQu Wenruo * needed range, other unrelated range in the same page will not be touched. 31*894d1378SQu Wenruo * 32*894d1378SQu Wenruo * Metadata write support is partial. 33*894d1378SQu Wenruo * The writeback is still for the full page, but we will only submit 34*894d1378SQu Wenruo * the dirty extent buffers in the page. 35*894d1378SQu Wenruo * 36*894d1378SQu Wenruo * This means, if we have a metadata page like this: 37*894d1378SQu Wenruo * 38*894d1378SQu Wenruo * Page offset 39*894d1378SQu Wenruo * 0 16K 32K 48K 64K 40*894d1378SQu Wenruo * |/////////| |///////////| 41*894d1378SQu Wenruo * \- Tree block A \- Tree block B 42*894d1378SQu Wenruo * 43*894d1378SQu Wenruo * Even if we just want to writeback tree block A, we will also writeback 44*894d1378SQu Wenruo * tree block B if it's also dirty. 45*894d1378SQu Wenruo * 46*894d1378SQu Wenruo * This may cause extra metadata writeback which results more COW. 47*894d1378SQu Wenruo * 48*894d1378SQu Wenruo * Implementation: 49*894d1378SQu Wenruo * 50*894d1378SQu Wenruo * - Common 51*894d1378SQu Wenruo * Both metadata and data will use a new structure, btrfs_subpage, to 52*894d1378SQu Wenruo * record the status of each sector inside a page. This provides the extra 53*894d1378SQu Wenruo * granularity needed. 54*894d1378SQu Wenruo * 55*894d1378SQu Wenruo * - Metadata 56*894d1378SQu Wenruo * Since we have multiple tree blocks inside one page, we can't rely on page 57*894d1378SQu Wenruo * locking anymore, or we will have greatly reduced concurrency or even 58*894d1378SQu Wenruo * deadlocks (hold one tree lock while trying to lock another tree lock in 59*894d1378SQu Wenruo * the same page). 60*894d1378SQu Wenruo * 61*894d1378SQu Wenruo * Thus for metadata locking, subpage support relies on io_tree locking only. 62*894d1378SQu Wenruo * This means a slightly higher tree locking latency. 63*894d1378SQu Wenruo */ 64*894d1378SQu Wenruo 65cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, 66cac06d84SQu Wenruo struct page *page, enum btrfs_subpage_type type) 67cac06d84SQu Wenruo { 68760f991fSQu Wenruo struct btrfs_subpage *subpage = NULL; 69760f991fSQu Wenruo int ret; 70cac06d84SQu Wenruo 71cac06d84SQu Wenruo /* 72cac06d84SQu Wenruo * We have cases like a dummy extent buffer page, which is not mappped 73cac06d84SQu Wenruo * and doesn't need to be locked. 74cac06d84SQu Wenruo */ 75cac06d84SQu Wenruo if (page->mapping) 76cac06d84SQu Wenruo ASSERT(PageLocked(page)); 77cac06d84SQu Wenruo /* Either not subpage, or the page already has private attached */ 78cac06d84SQu Wenruo if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page)) 79cac06d84SQu Wenruo return 0; 80cac06d84SQu Wenruo 81760f991fSQu Wenruo ret = btrfs_alloc_subpage(fs_info, &subpage, type); 82760f991fSQu Wenruo if (ret < 0) 83760f991fSQu Wenruo return ret; 84cac06d84SQu Wenruo attach_page_private(page, subpage); 85cac06d84SQu Wenruo return 0; 86cac06d84SQu Wenruo } 87cac06d84SQu Wenruo 88cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, 89cac06d84SQu Wenruo struct page *page) 90cac06d84SQu Wenruo { 91cac06d84SQu Wenruo struct btrfs_subpage *subpage; 92cac06d84SQu Wenruo 93cac06d84SQu Wenruo /* Either not subpage, or already detached */ 94cac06d84SQu Wenruo if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page)) 95cac06d84SQu Wenruo return; 96cac06d84SQu Wenruo 97cac06d84SQu Wenruo subpage = (struct btrfs_subpage *)detach_page_private(page); 98cac06d84SQu Wenruo ASSERT(subpage); 99760f991fSQu Wenruo btrfs_free_subpage(subpage); 100760f991fSQu Wenruo } 101760f991fSQu Wenruo 102760f991fSQu Wenruo int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, 103760f991fSQu Wenruo struct btrfs_subpage **ret, 104760f991fSQu Wenruo enum btrfs_subpage_type type) 105760f991fSQu Wenruo { 106760f991fSQu Wenruo if (fs_info->sectorsize == PAGE_SIZE) 107760f991fSQu Wenruo return 0; 108760f991fSQu Wenruo 109760f991fSQu Wenruo *ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS); 110760f991fSQu Wenruo if (!*ret) 111760f991fSQu Wenruo return -ENOMEM; 112760f991fSQu Wenruo spin_lock_init(&(*ret)->lock); 1138ff8466dSQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) 1148ff8466dSQu Wenruo atomic_set(&(*ret)->eb_refs, 0); 11592082d40SQu Wenruo else 11692082d40SQu Wenruo atomic_set(&(*ret)->readers, 0); 117760f991fSQu Wenruo return 0; 118760f991fSQu Wenruo } 119760f991fSQu Wenruo 120760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage) 121760f991fSQu Wenruo { 122cac06d84SQu Wenruo kfree(subpage); 123cac06d84SQu Wenruo } 1248ff8466dSQu Wenruo 1258ff8466dSQu Wenruo /* 1268ff8466dSQu Wenruo * Increase the eb_refs of current subpage. 1278ff8466dSQu Wenruo * 1288ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing 1298ff8466dSQu Wenruo * of the same page. 1308ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree, 1318ff8466dSQu Wenruo * detach_extent_buffer_page() won't detach the page private while we're still 1328ff8466dSQu Wenruo * allocating the extent buffer. 1338ff8466dSQu Wenruo */ 1348ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info, 1358ff8466dSQu Wenruo struct page *page) 1368ff8466dSQu Wenruo { 1378ff8466dSQu Wenruo struct btrfs_subpage *subpage; 1388ff8466dSQu Wenruo 1398ff8466dSQu Wenruo if (fs_info->sectorsize == PAGE_SIZE) 1408ff8466dSQu Wenruo return; 1418ff8466dSQu Wenruo 1428ff8466dSQu Wenruo ASSERT(PagePrivate(page) && page->mapping); 1438ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock); 1448ff8466dSQu Wenruo 1458ff8466dSQu Wenruo subpage = (struct btrfs_subpage *)page->private; 1468ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs); 1478ff8466dSQu Wenruo } 1488ff8466dSQu Wenruo 1498ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info, 1508ff8466dSQu Wenruo struct page *page) 1518ff8466dSQu Wenruo { 1528ff8466dSQu Wenruo struct btrfs_subpage *subpage; 1538ff8466dSQu Wenruo 1548ff8466dSQu Wenruo if (fs_info->sectorsize == PAGE_SIZE) 1558ff8466dSQu Wenruo return; 1568ff8466dSQu Wenruo 1578ff8466dSQu Wenruo ASSERT(PagePrivate(page) && page->mapping); 1588ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock); 1598ff8466dSQu Wenruo 1608ff8466dSQu Wenruo subpage = (struct btrfs_subpage *)page->private; 1618ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs)); 1628ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs); 1638ff8466dSQu Wenruo } 164a1d767c1SQu Wenruo 16592082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, 16692082d40SQu Wenruo struct page *page, u64 start, u32 len) 16792082d40SQu Wenruo { 16892082d40SQu Wenruo /* Basic checks */ 16992082d40SQu Wenruo ASSERT(PagePrivate(page) && page->private); 17092082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 17192082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize)); 17292082d40SQu Wenruo /* 17392082d40SQu Wenruo * The range check only works for mapped page, we can still have 17492082d40SQu Wenruo * unmapped page like dummy extent buffer pages. 17592082d40SQu Wenruo */ 17692082d40SQu Wenruo if (page->mapping) 17792082d40SQu Wenruo ASSERT(page_offset(page) <= start && 17892082d40SQu Wenruo start + len <= page_offset(page) + PAGE_SIZE); 17992082d40SQu Wenruo } 18092082d40SQu Wenruo 18192082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, 18292082d40SQu Wenruo struct page *page, u64 start, u32 len) 18392082d40SQu Wenruo { 18492082d40SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 18592082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 18692082d40SQu Wenruo int ret; 18792082d40SQu Wenruo 18892082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 18992082d40SQu Wenruo 19092082d40SQu Wenruo ret = atomic_add_return(nbits, &subpage->readers); 19192082d40SQu Wenruo ASSERT(ret == nbits); 19292082d40SQu Wenruo } 19392082d40SQu Wenruo 19492082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, 19592082d40SQu Wenruo struct page *page, u64 start, u32 len) 19692082d40SQu Wenruo { 19792082d40SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 19892082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 19992082d40SQu Wenruo 20092082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 20192082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits); 20292082d40SQu Wenruo if (atomic_sub_and_test(nbits, &subpage->readers)) 20392082d40SQu Wenruo unlock_page(page); 20492082d40SQu Wenruo } 20592082d40SQu Wenruo 206a1d767c1SQu Wenruo /* 207a1d767c1SQu Wenruo * Convert the [start, start + len) range into a u16 bitmap 208a1d767c1SQu Wenruo * 209a1d767c1SQu Wenruo * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0. 210a1d767c1SQu Wenruo */ 211a1d767c1SQu Wenruo static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info, 212a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 213a1d767c1SQu Wenruo { 214a1d767c1SQu Wenruo const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits; 215a1d767c1SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 216a1d767c1SQu Wenruo 21792082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 218a1d767c1SQu Wenruo 219a1d767c1SQu Wenruo /* 220a1d767c1SQu Wenruo * Here nbits can be 16, thus can go beyond u16 range. We make the 221a1d767c1SQu Wenruo * first left shift to be calculate in unsigned long (at least u32), 222a1d767c1SQu Wenruo * then truncate the result to u16. 223a1d767c1SQu Wenruo */ 224a1d767c1SQu Wenruo return (u16)(((1UL << nbits) - 1) << bit_start); 225a1d767c1SQu Wenruo } 226a1d767c1SQu Wenruo 227a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, 228a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 229a1d767c1SQu Wenruo { 230a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 231a1d767c1SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 232a1d767c1SQu Wenruo unsigned long flags; 233a1d767c1SQu Wenruo 234a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 235a1d767c1SQu Wenruo subpage->uptodate_bitmap |= tmp; 236a1d767c1SQu Wenruo if (subpage->uptodate_bitmap == U16_MAX) 237a1d767c1SQu Wenruo SetPageUptodate(page); 238a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 239a1d767c1SQu Wenruo } 240a1d767c1SQu Wenruo 241a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, 242a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 243a1d767c1SQu Wenruo { 244a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 245a1d767c1SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 246a1d767c1SQu Wenruo unsigned long flags; 247a1d767c1SQu Wenruo 248a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 249a1d767c1SQu Wenruo subpage->uptodate_bitmap &= ~tmp; 250a1d767c1SQu Wenruo ClearPageUptodate(page); 251a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 252a1d767c1SQu Wenruo } 253a1d767c1SQu Wenruo 25403a816b3SQu Wenruo void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info, 25503a816b3SQu Wenruo struct page *page, u64 start, u32 len) 25603a816b3SQu Wenruo { 25703a816b3SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 25803a816b3SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 25903a816b3SQu Wenruo unsigned long flags; 26003a816b3SQu Wenruo 26103a816b3SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 26203a816b3SQu Wenruo subpage->error_bitmap |= tmp; 26303a816b3SQu Wenruo SetPageError(page); 26403a816b3SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 26503a816b3SQu Wenruo } 26603a816b3SQu Wenruo 26703a816b3SQu Wenruo void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info, 26803a816b3SQu Wenruo struct page *page, u64 start, u32 len) 26903a816b3SQu Wenruo { 27003a816b3SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 27103a816b3SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 27203a816b3SQu Wenruo unsigned long flags; 27303a816b3SQu Wenruo 27403a816b3SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 27503a816b3SQu Wenruo subpage->error_bitmap &= ~tmp; 27603a816b3SQu Wenruo if (subpage->error_bitmap == 0) 27703a816b3SQu Wenruo ClearPageError(page); 27803a816b3SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 27903a816b3SQu Wenruo } 28003a816b3SQu Wenruo 281d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, 282d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 283d8a5713eSQu Wenruo { 284d8a5713eSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 285d8a5713eSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 286d8a5713eSQu Wenruo unsigned long flags; 287d8a5713eSQu Wenruo 288d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 289d8a5713eSQu Wenruo subpage->dirty_bitmap |= tmp; 290d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 291d8a5713eSQu Wenruo set_page_dirty(page); 292d8a5713eSQu Wenruo } 293d8a5713eSQu Wenruo 294d8a5713eSQu Wenruo /* 295d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap. 296d8a5713eSQu Wenruo * 297d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the 298d8a5713eSQu Wenruo * dirty_bitmap. 299d8a5713eSQu Wenruo * Return false otherwise. 300d8a5713eSQu Wenruo * 301d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have 302d8a5713eSQu Wenruo * extra handling for tree blocks. 303d8a5713eSQu Wenruo */ 304d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, 305d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 306d8a5713eSQu Wenruo { 307d8a5713eSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 308d8a5713eSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 309d8a5713eSQu Wenruo unsigned long flags; 310d8a5713eSQu Wenruo bool last = false; 311d8a5713eSQu Wenruo 312d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 313d8a5713eSQu Wenruo subpage->dirty_bitmap &= ~tmp; 314d8a5713eSQu Wenruo if (subpage->dirty_bitmap == 0) 315d8a5713eSQu Wenruo last = true; 316d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 317d8a5713eSQu Wenruo return last; 318d8a5713eSQu Wenruo } 319d8a5713eSQu Wenruo 320d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, 321d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 322d8a5713eSQu Wenruo { 323d8a5713eSQu Wenruo bool last; 324d8a5713eSQu Wenruo 325d8a5713eSQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len); 326d8a5713eSQu Wenruo if (last) 327d8a5713eSQu Wenruo clear_page_dirty_for_io(page); 328d8a5713eSQu Wenruo } 329d8a5713eSQu Wenruo 3303470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, 3313470da3bSQu Wenruo struct page *page, u64 start, u32 len) 3323470da3bSQu Wenruo { 3333470da3bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 3343470da3bSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 3353470da3bSQu Wenruo unsigned long flags; 3363470da3bSQu Wenruo 3373470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 3383470da3bSQu Wenruo subpage->writeback_bitmap |= tmp; 3393470da3bSQu Wenruo set_page_writeback(page); 3403470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 3413470da3bSQu Wenruo } 3423470da3bSQu Wenruo 3433470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, 3443470da3bSQu Wenruo struct page *page, u64 start, u32 len) 3453470da3bSQu Wenruo { 3463470da3bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 3473470da3bSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 3483470da3bSQu Wenruo unsigned long flags; 3493470da3bSQu Wenruo 3503470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 3513470da3bSQu Wenruo subpage->writeback_bitmap &= ~tmp; 3523470da3bSQu Wenruo if (subpage->writeback_bitmap == 0) 3533470da3bSQu Wenruo end_page_writeback(page); 3543470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 3553470da3bSQu Wenruo } 3563470da3bSQu Wenruo 357a1d767c1SQu Wenruo /* 358a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits 359a1d767c1SQu Wenruo * are tested in the same way. 360a1d767c1SQu Wenruo */ 361a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ 362a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ 363a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 364a1d767c1SQu Wenruo { \ 365a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \ 366a1d767c1SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \ 367a1d767c1SQu Wenruo unsigned long flags; \ 368a1d767c1SQu Wenruo bool ret; \ 369a1d767c1SQu Wenruo \ 370a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \ 371a1d767c1SQu Wenruo ret = ((subpage->name##_bitmap & tmp) == tmp); \ 372a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \ 373a1d767c1SQu Wenruo return ret; \ 374a1d767c1SQu Wenruo } 375a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); 37603a816b3SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error); 377d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); 3783470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); 379a1d767c1SQu Wenruo 380a1d767c1SQu Wenruo /* 381a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed 382a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall 383a1d767c1SQu Wenruo * back to regular sectorsize branch. 384a1d767c1SQu Wenruo */ 385a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \ 386a1d767c1SQu Wenruo test_page_func) \ 387a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ 388a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 389a1d767c1SQu Wenruo { \ 390a1d767c1SQu Wenruo if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ 391a1d767c1SQu Wenruo set_page_func(page); \ 392a1d767c1SQu Wenruo return; \ 393a1d767c1SQu Wenruo } \ 394a1d767c1SQu Wenruo btrfs_subpage_set_##name(fs_info, page, start, len); \ 395a1d767c1SQu Wenruo } \ 396a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ 397a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 398a1d767c1SQu Wenruo { \ 399a1d767c1SQu Wenruo if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ 400a1d767c1SQu Wenruo clear_page_func(page); \ 401a1d767c1SQu Wenruo return; \ 402a1d767c1SQu Wenruo } \ 403a1d767c1SQu Wenruo btrfs_subpage_clear_##name(fs_info, page, start, len); \ 404a1d767c1SQu Wenruo } \ 405a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \ 406a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 407a1d767c1SQu Wenruo { \ 408a1d767c1SQu Wenruo if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \ 409a1d767c1SQu Wenruo return test_page_func(page); \ 410a1d767c1SQu Wenruo return btrfs_subpage_test_##name(fs_info, page, start, len); \ 411a1d767c1SQu Wenruo } 412a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate, 413a1d767c1SQu Wenruo PageUptodate); 41403a816b3SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError); 415d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io, 416d8a5713eSQu Wenruo PageDirty); 4173470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback, 4183470da3bSQu Wenruo PageWriteback); 419