17336d0e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2b1e71b06SSteven Whitehouse /* 3b1e71b06SSteven Whitehouse * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4b1e71b06SSteven Whitehouse * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5b1e71b06SSteven Whitehouse */ 6b1e71b06SSteven Whitehouse 7b1e71b06SSteven Whitehouse #include <linux/sched.h> 8b1e71b06SSteven Whitehouse #include <linux/slab.h> 9b1e71b06SSteven Whitehouse #include <linux/spinlock.h> 10b1e71b06SSteven Whitehouse #include <linux/completion.h> 11b1e71b06SSteven Whitehouse #include <linux/buffer_head.h> 12b1e71b06SSteven Whitehouse #include <linux/pagemap.h> 13b1e71b06SSteven Whitehouse #include <linux/pagevec.h> 14b1e71b06SSteven Whitehouse #include <linux/mpage.h> 15b1e71b06SSteven Whitehouse #include <linux/fs.h> 16b1e71b06SSteven Whitehouse #include <linux/writeback.h> 17b1e71b06SSteven Whitehouse #include <linux/swap.h> 18b1e71b06SSteven Whitehouse #include <linux/gfs2_ondisk.h> 19b1e71b06SSteven Whitehouse #include <linux/backing-dev.h> 20e2e40f2cSChristoph Hellwig #include <linux/uio.h> 21774016b2SSteven Whitehouse #include <trace/events/writeback.h> 2264bc06bbSAndreas Gruenbacher #include <linux/sched/signal.h> 23b1e71b06SSteven Whitehouse 24b1e71b06SSteven Whitehouse #include "gfs2.h" 25b1e71b06SSteven Whitehouse #include "incore.h" 26b1e71b06SSteven Whitehouse #include "bmap.h" 27b1e71b06SSteven Whitehouse #include "glock.h" 28b1e71b06SSteven Whitehouse #include "inode.h" 29b1e71b06SSteven Whitehouse #include "log.h" 30b1e71b06SSteven Whitehouse #include "meta_io.h" 31b1e71b06SSteven Whitehouse #include "quota.h" 32b1e71b06SSteven Whitehouse #include "trans.h" 33b1e71b06SSteven Whitehouse #include "rgrp.h" 34b1e71b06SSteven Whitehouse #include "super.h" 35b1e71b06SSteven Whitehouse #include "util.h" 36b1e71b06SSteven Whitehouse #include "glops.h" 3764bc06bbSAndreas Gruenbacher #include "aops.h" 38b1e71b06SSteven Whitehouse 39b1e71b06SSteven Whitehouse 40*c1b0c3cfSAndreas Gruenbacher void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, 4188b65ce5SAndreas Gruenbacher unsigned int from, unsigned int len) 42b1e71b06SSteven Whitehouse { 43*c1b0c3cfSAndreas Gruenbacher struct buffer_head *head = folio_buffers(folio); 44b1e71b06SSteven Whitehouse unsigned int bsize = head->b_size; 45b1e71b06SSteven Whitehouse struct buffer_head *bh; 4688b65ce5SAndreas Gruenbacher unsigned int to = from + len; 47b1e71b06SSteven Whitehouse unsigned int start, end; 48b1e71b06SSteven Whitehouse 49b1e71b06SSteven Whitehouse for (bh = head, start = 0; bh != head || !start; 50b1e71b06SSteven Whitehouse bh = bh->b_this_page, start = end) { 51b1e71b06SSteven Whitehouse end = start + bsize; 5288b65ce5SAndreas Gruenbacher if (end <= from) 53b1e71b06SSteven Whitehouse continue; 5488b65ce5SAndreas Gruenbacher if (start >= to) 5588b65ce5SAndreas Gruenbacher break; 56b1e71b06SSteven Whitehouse set_buffer_uptodate(bh); 57350a9b0aSSteven Whitehouse gfs2_trans_add_data(ip->i_gl, bh); 58b1e71b06SSteven Whitehouse } 59b1e71b06SSteven Whitehouse } 60b1e71b06SSteven Whitehouse 61b1e71b06SSteven Whitehouse /** 62b1e71b06SSteven Whitehouse * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 63b1e71b06SSteven Whitehouse * @inode: The inode 64b1e71b06SSteven Whitehouse * @lblock: The block number to look up 65b1e71b06SSteven Whitehouse * @bh_result: The buffer head to return the result in 66b1e71b06SSteven Whitehouse * @create: Non-zero if we may add block to the file 67b1e71b06SSteven Whitehouse * 68b1e71b06SSteven Whitehouse * Returns: errno 69b1e71b06SSteven Whitehouse */ 70b1e71b06SSteven Whitehouse 71b1e71b06SSteven Whitehouse static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 72b1e71b06SSteven Whitehouse struct buffer_head *bh_result, int create) 73b1e71b06SSteven Whitehouse { 74b1e71b06SSteven Whitehouse int error; 75b1e71b06SSteven Whitehouse 76b1e71b06SSteven Whitehouse error = gfs2_block_map(inode, lblock, bh_result, 0); 77b1e71b06SSteven Whitehouse if (error) 78b1e71b06SSteven Whitehouse return error; 79b1e71b06SSteven Whitehouse if (!buffer_mapped(bh_result)) 804e79e3f0SBob Peterson return -ENODATA; 81b1e71b06SSteven Whitehouse return 0; 82b1e71b06SSteven Whitehouse } 83b1e71b06SSteven Whitehouse 84b1e71b06SSteven Whitehouse /** 8521b6924bSBob Peterson * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page 8621b6924bSBob Peterson * @page: The page to write 8721b6924bSBob Peterson * @wbc: The writeback control 8821b6924bSBob Peterson * 8921b6924bSBob Peterson * This is the same as calling block_write_full_page, but it also 90fd4c5748SBenjamin Marzinski * writes pages outside of i_size 91fd4c5748SBenjamin Marzinski */ 9221b6924bSBob Peterson static int gfs2_write_jdata_page(struct page *page, 93fd4c5748SBenjamin Marzinski struct writeback_control *wbc) 94fd4c5748SBenjamin Marzinski { 95fd4c5748SBenjamin Marzinski struct inode * const inode = page->mapping->host; 96fd4c5748SBenjamin Marzinski loff_t i_size = i_size_read(inode); 97fd4c5748SBenjamin Marzinski const pgoff_t end_index = i_size >> PAGE_SHIFT; 98fd4c5748SBenjamin Marzinski unsigned offset; 99fd4c5748SBenjamin Marzinski 100fd4c5748SBenjamin Marzinski /* 101fd4c5748SBenjamin Marzinski * The page straddles i_size. It must be zeroed out on each and every 102fd4c5748SBenjamin Marzinski * writepage invocation because it may be mmapped. "A file is mapped 103fd4c5748SBenjamin Marzinski * in multiples of the page size. For a file that is not a multiple of 104fd4c5748SBenjamin Marzinski * the page size, the remaining memory is zeroed when mapped, and 105fd4c5748SBenjamin Marzinski * writes to that region are not written out to the file." 106fd4c5748SBenjamin Marzinski */ 107fd4c5748SBenjamin Marzinski offset = i_size & (PAGE_SIZE - 1); 108fd4c5748SBenjamin Marzinski if (page->index == end_index && offset) 109fd4c5748SBenjamin Marzinski zero_user_segment(page, offset, PAGE_SIZE); 110fd4c5748SBenjamin Marzinski 11121b6924bSBob Peterson return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc, 112fd4c5748SBenjamin Marzinski end_buffer_async_write); 113fd4c5748SBenjamin Marzinski } 114fd4c5748SBenjamin Marzinski 115b1e71b06SSteven Whitehouse /** 116b1e71b06SSteven Whitehouse * __gfs2_jdata_writepage - The core of jdata writepage 117b1e71b06SSteven Whitehouse * @page: The page to write 118b1e71b06SSteven Whitehouse * @wbc: The writeback control 119b1e71b06SSteven Whitehouse * 120b1e71b06SSteven Whitehouse * This is shared between writepage and writepages and implements the 121b1e71b06SSteven Whitehouse * core of the writepage operation. If a transaction is required then 122b1e71b06SSteven Whitehouse * PageChecked will have been set and the transaction will have 123b1e71b06SSteven Whitehouse * already been started before this is called. 124b1e71b06SSteven Whitehouse */ 125b1e71b06SSteven Whitehouse 126b1e71b06SSteven Whitehouse static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 127b1e71b06SSteven Whitehouse { 128b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 129b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 130b1e71b06SSteven Whitehouse 131b1e71b06SSteven Whitehouse if (PageChecked(page)) { 132b1e71b06SSteven Whitehouse ClearPageChecked(page); 133b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) { 134b1e71b06SSteven Whitehouse create_empty_buffers(page, inode->i_sb->s_blocksize, 13547a9a527SFabian Frederick BIT(BH_Dirty)|BIT(BH_Uptodate)); 136b1e71b06SSteven Whitehouse } 137*c1b0c3cfSAndreas Gruenbacher gfs2_trans_add_databufs(ip, page_folio(page), 0, PAGE_SIZE); 138b1e71b06SSteven Whitehouse } 13921b6924bSBob Peterson return gfs2_write_jdata_page(page, wbc); 140b1e71b06SSteven Whitehouse } 141b1e71b06SSteven Whitehouse 142b1e71b06SSteven Whitehouse /** 143b1e71b06SSteven Whitehouse * gfs2_jdata_writepage - Write complete page 144b1e71b06SSteven Whitehouse * @page: Page to write 1451272574bSFabian Frederick * @wbc: The writeback control 146b1e71b06SSteven Whitehouse * 147b1e71b06SSteven Whitehouse * Returns: errno 148b1e71b06SSteven Whitehouse * 149b1e71b06SSteven Whitehouse */ 150b1e71b06SSteven Whitehouse 151b1e71b06SSteven Whitehouse static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 152b1e71b06SSteven Whitehouse { 153b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 154fd4c5748SBenjamin Marzinski struct gfs2_inode *ip = GFS2_I(inode); 155b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 156b1e71b06SSteven Whitehouse 157fd4c5748SBenjamin Marzinski if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 158fd4c5748SBenjamin Marzinski goto out; 159fd4c5748SBenjamin Marzinski if (PageChecked(page) || current->journal_info) 160b1e71b06SSteven Whitehouse goto out_ignore; 161e556280dSBob Peterson return __gfs2_jdata_writepage(page, wbc); 162b1e71b06SSteven Whitehouse 163b1e71b06SSteven Whitehouse out_ignore: 164b1e71b06SSteven Whitehouse redirty_page_for_writepage(wbc, page); 165fd4c5748SBenjamin Marzinski out: 166b1e71b06SSteven Whitehouse unlock_page(page); 167b1e71b06SSteven Whitehouse return 0; 168b1e71b06SSteven Whitehouse } 169b1e71b06SSteven Whitehouse 170b1e71b06SSteven Whitehouse /** 17145138990SSteven Whitehouse * gfs2_writepages - Write a bunch of dirty pages back to disk 172b1e71b06SSteven Whitehouse * @mapping: The mapping to write 173b1e71b06SSteven Whitehouse * @wbc: Write-back control 174b1e71b06SSteven Whitehouse * 17545138990SSteven Whitehouse * Used for both ordered and writeback modes. 176b1e71b06SSteven Whitehouse */ 17745138990SSteven Whitehouse static int gfs2_writepages(struct address_space *mapping, 178b1e71b06SSteven Whitehouse struct writeback_control *wbc) 179b1e71b06SSteven Whitehouse { 180b066a4eeSAbhi Das struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 1812164f9b9SChristoph Hellwig struct iomap_writepage_ctx wpc = { }; 1822164f9b9SChristoph Hellwig int ret; 183b066a4eeSAbhi Das 184b066a4eeSAbhi Das /* 185b066a4eeSAbhi Das * Even if we didn't write any pages here, we might still be holding 186b066a4eeSAbhi Das * dirty pages in the ail. We forcibly flush the ail because we don't 187b066a4eeSAbhi Das * want balance_dirty_pages() to loop indefinitely trying to write out 188b066a4eeSAbhi Das * pages held in the ail that it can't find. 189b066a4eeSAbhi Das */ 1902164f9b9SChristoph Hellwig ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); 191b066a4eeSAbhi Das if (ret == 0) 192b066a4eeSAbhi Das set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 193b066a4eeSAbhi Das return ret; 194b1e71b06SSteven Whitehouse } 195b1e71b06SSteven Whitehouse 196b1e71b06SSteven Whitehouse /** 197b1e71b06SSteven Whitehouse * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 198b1e71b06SSteven Whitehouse * @mapping: The mapping 199b1e71b06SSteven Whitehouse * @wbc: The writeback control 200b1e71b06SSteven Whitehouse * @pvec: The vector of pages 201b1e71b06SSteven Whitehouse * @nr_pages: The number of pages to write 2021272574bSFabian Frederick * @done_index: Page index 203b1e71b06SSteven Whitehouse * 204b1e71b06SSteven Whitehouse * Returns: non-zero if loop should terminate, zero otherwise 205b1e71b06SSteven Whitehouse */ 206b1e71b06SSteven Whitehouse 207b1e71b06SSteven Whitehouse static int gfs2_write_jdata_pagevec(struct address_space *mapping, 208b1e71b06SSteven Whitehouse struct writeback_control *wbc, 209b1e71b06SSteven Whitehouse struct pagevec *pvec, 2109aa01593SAndreas Gruenbacher int nr_pages, 211774016b2SSteven Whitehouse pgoff_t *done_index) 212b1e71b06SSteven Whitehouse { 213b1e71b06SSteven Whitehouse struct inode *inode = mapping->host; 214b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 21545eb0504SAndreas Gruenbacher unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits); 216b1e71b06SSteven Whitehouse int i; 217b1e71b06SSteven Whitehouse int ret; 218b1e71b06SSteven Whitehouse 219b1e71b06SSteven Whitehouse ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 220b1e71b06SSteven Whitehouse if (ret < 0) 221b1e71b06SSteven Whitehouse return ret; 222b1e71b06SSteven Whitehouse 223b1e71b06SSteven Whitehouse for(i = 0; i < nr_pages; i++) { 224b1e71b06SSteven Whitehouse struct page *page = pvec->pages[i]; 225b1e71b06SSteven Whitehouse 226774016b2SSteven Whitehouse *done_index = page->index; 227774016b2SSteven Whitehouse 228b1e71b06SSteven Whitehouse lock_page(page); 229b1e71b06SSteven Whitehouse 230b1e71b06SSteven Whitehouse if (unlikely(page->mapping != mapping)) { 231774016b2SSteven Whitehouse continue_unlock: 232b1e71b06SSteven Whitehouse unlock_page(page); 233b1e71b06SSteven Whitehouse continue; 234b1e71b06SSteven Whitehouse } 235b1e71b06SSteven Whitehouse 236774016b2SSteven Whitehouse if (!PageDirty(page)) { 237774016b2SSteven Whitehouse /* someone wrote it for us */ 238774016b2SSteven Whitehouse goto continue_unlock; 239b1e71b06SSteven Whitehouse } 240b1e71b06SSteven Whitehouse 241774016b2SSteven Whitehouse if (PageWriteback(page)) { 242b1e71b06SSteven Whitehouse if (wbc->sync_mode != WB_SYNC_NONE) 243b1e71b06SSteven Whitehouse wait_on_page_writeback(page); 244774016b2SSteven Whitehouse else 245774016b2SSteven Whitehouse goto continue_unlock; 246b1e71b06SSteven Whitehouse } 247b1e71b06SSteven Whitehouse 248774016b2SSteven Whitehouse BUG_ON(PageWriteback(page)); 249774016b2SSteven Whitehouse if (!clear_page_dirty_for_io(page)) 250774016b2SSteven Whitehouse goto continue_unlock; 251774016b2SSteven Whitehouse 252de1414a6SChristoph Hellwig trace_wbc_writepage(wbc, inode_to_bdi(inode)); 253b1e71b06SSteven Whitehouse 254b1e71b06SSteven Whitehouse ret = __gfs2_jdata_writepage(page, wbc); 255774016b2SSteven Whitehouse if (unlikely(ret)) { 256774016b2SSteven Whitehouse if (ret == AOP_WRITEPAGE_ACTIVATE) { 257774016b2SSteven Whitehouse unlock_page(page); 258774016b2SSteven Whitehouse ret = 0; 259774016b2SSteven Whitehouse } else { 260b1e71b06SSteven Whitehouse 261774016b2SSteven Whitehouse /* 262774016b2SSteven Whitehouse * done_index is set past this page, 263774016b2SSteven Whitehouse * so media errors will not choke 264774016b2SSteven Whitehouse * background writeout for the entire 265774016b2SSteven Whitehouse * file. This has consequences for 266774016b2SSteven Whitehouse * range_cyclic semantics (ie. it may 267774016b2SSteven Whitehouse * not be suitable for data integrity 268774016b2SSteven Whitehouse * writeout). 269774016b2SSteven Whitehouse */ 270774016b2SSteven Whitehouse *done_index = page->index + 1; 271b1e71b06SSteven Whitehouse ret = 1; 272774016b2SSteven Whitehouse break; 273774016b2SSteven Whitehouse } 274774016b2SSteven Whitehouse } 275774016b2SSteven Whitehouse 276774016b2SSteven Whitehouse /* 277774016b2SSteven Whitehouse * We stop writing back only if we are not doing 278774016b2SSteven Whitehouse * integrity sync. In case of integrity sync we have to 279774016b2SSteven Whitehouse * keep going until we have written all the pages 280774016b2SSteven Whitehouse * we tagged for writeback prior to entering this loop. 281774016b2SSteven Whitehouse */ 282774016b2SSteven Whitehouse if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 283774016b2SSteven Whitehouse ret = 1; 284774016b2SSteven Whitehouse break; 285774016b2SSteven Whitehouse } 286774016b2SSteven Whitehouse 287b1e71b06SSteven Whitehouse } 288b1e71b06SSteven Whitehouse gfs2_trans_end(sdp); 289b1e71b06SSteven Whitehouse return ret; 290b1e71b06SSteven Whitehouse } 291b1e71b06SSteven Whitehouse 292b1e71b06SSteven Whitehouse /** 293b1e71b06SSteven Whitehouse * gfs2_write_cache_jdata - Like write_cache_pages but different 294b1e71b06SSteven Whitehouse * @mapping: The mapping to write 295b1e71b06SSteven Whitehouse * @wbc: The writeback control 296b1e71b06SSteven Whitehouse * 297b1e71b06SSteven Whitehouse * The reason that we use our own function here is that we need to 298b1e71b06SSteven Whitehouse * start transactions before we grab page locks. This allows us 299b1e71b06SSteven Whitehouse * to get the ordering right. 300b1e71b06SSteven Whitehouse */ 301b1e71b06SSteven Whitehouse 302b1e71b06SSteven Whitehouse static int gfs2_write_cache_jdata(struct address_space *mapping, 303b1e71b06SSteven Whitehouse struct writeback_control *wbc) 304b1e71b06SSteven Whitehouse { 305b1e71b06SSteven Whitehouse int ret = 0; 306b1e71b06SSteven Whitehouse int done = 0; 307b1e71b06SSteven Whitehouse struct pagevec pvec; 308b1e71b06SSteven Whitehouse int nr_pages; 3093f649ab7SKees Cook pgoff_t writeback_index; 310b1e71b06SSteven Whitehouse pgoff_t index; 311b1e71b06SSteven Whitehouse pgoff_t end; 312774016b2SSteven Whitehouse pgoff_t done_index; 313774016b2SSteven Whitehouse int cycled; 314b1e71b06SSteven Whitehouse int range_whole = 0; 31510bbd235SMatthew Wilcox xa_mark_t tag; 316b1e71b06SSteven Whitehouse 31786679820SMel Gorman pagevec_init(&pvec); 318b1e71b06SSteven Whitehouse if (wbc->range_cyclic) { 319774016b2SSteven Whitehouse writeback_index = mapping->writeback_index; /* prev offset */ 320774016b2SSteven Whitehouse index = writeback_index; 321774016b2SSteven Whitehouse if (index == 0) 322774016b2SSteven Whitehouse cycled = 1; 323774016b2SSteven Whitehouse else 324774016b2SSteven Whitehouse cycled = 0; 325b1e71b06SSteven Whitehouse end = -1; 326b1e71b06SSteven Whitehouse } else { 32709cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT; 32809cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT; 329b1e71b06SSteven Whitehouse if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 330b1e71b06SSteven Whitehouse range_whole = 1; 331774016b2SSteven Whitehouse cycled = 1; /* ignore range_cyclic tests */ 332b1e71b06SSteven Whitehouse } 333774016b2SSteven Whitehouse if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 334774016b2SSteven Whitehouse tag = PAGECACHE_TAG_TOWRITE; 335774016b2SSteven Whitehouse else 336774016b2SSteven Whitehouse tag = PAGECACHE_TAG_DIRTY; 337b1e71b06SSteven Whitehouse 338b1e71b06SSteven Whitehouse retry: 339774016b2SSteven Whitehouse if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 340774016b2SSteven Whitehouse tag_pages_for_writeback(mapping, index, end); 341774016b2SSteven Whitehouse done_index = index; 342774016b2SSteven Whitehouse while (!done && (index <= end)) { 343d2bc5b3cSJan Kara nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 34467fd707fSJan Kara tag); 345774016b2SSteven Whitehouse if (nr_pages == 0) 346774016b2SSteven Whitehouse break; 347774016b2SSteven Whitehouse 3489aa01593SAndreas Gruenbacher ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); 349b1e71b06SSteven Whitehouse if (ret) 350b1e71b06SSteven Whitehouse done = 1; 351b1e71b06SSteven Whitehouse if (ret > 0) 352b1e71b06SSteven Whitehouse ret = 0; 353b1e71b06SSteven Whitehouse pagevec_release(&pvec); 354b1e71b06SSteven Whitehouse cond_resched(); 355b1e71b06SSteven Whitehouse } 356b1e71b06SSteven Whitehouse 357774016b2SSteven Whitehouse if (!cycled && !done) { 358b1e71b06SSteven Whitehouse /* 359774016b2SSteven Whitehouse * range_cyclic: 360b1e71b06SSteven Whitehouse * We hit the last page and there is more work to be done: wrap 361b1e71b06SSteven Whitehouse * back to the start of the file 362b1e71b06SSteven Whitehouse */ 363774016b2SSteven Whitehouse cycled = 1; 364b1e71b06SSteven Whitehouse index = 0; 365774016b2SSteven Whitehouse end = writeback_index - 1; 366b1e71b06SSteven Whitehouse goto retry; 367b1e71b06SSteven Whitehouse } 368b1e71b06SSteven Whitehouse 369b1e71b06SSteven Whitehouse if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 370774016b2SSteven Whitehouse mapping->writeback_index = done_index; 371774016b2SSteven Whitehouse 372b1e71b06SSteven Whitehouse return ret; 373b1e71b06SSteven Whitehouse } 374b1e71b06SSteven Whitehouse 375b1e71b06SSteven Whitehouse 376b1e71b06SSteven Whitehouse /** 377b1e71b06SSteven Whitehouse * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 378b1e71b06SSteven Whitehouse * @mapping: The mapping to write 379b1e71b06SSteven Whitehouse * @wbc: The writeback control 380b1e71b06SSteven Whitehouse * 381b1e71b06SSteven Whitehouse */ 382b1e71b06SSteven Whitehouse 383b1e71b06SSteven Whitehouse static int gfs2_jdata_writepages(struct address_space *mapping, 384b1e71b06SSteven Whitehouse struct writeback_control *wbc) 385b1e71b06SSteven Whitehouse { 386b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 387b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 388b1e71b06SSteven Whitehouse int ret; 389b1e71b06SSteven Whitehouse 390b1e71b06SSteven Whitehouse ret = gfs2_write_cache_jdata(mapping, wbc); 391b1e71b06SSteven Whitehouse if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 392805c0907SBob Peterson gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 393805c0907SBob Peterson GFS2_LFC_JDATA_WPAGES); 394b1e71b06SSteven Whitehouse ret = gfs2_write_cache_jdata(mapping, wbc); 395b1e71b06SSteven Whitehouse } 396b1e71b06SSteven Whitehouse return ret; 397b1e71b06SSteven Whitehouse } 398b1e71b06SSteven Whitehouse 399b1e71b06SSteven Whitehouse /** 400b1e71b06SSteven Whitehouse * stuffed_readpage - Fill in a Linux page with stuffed file data 401b1e71b06SSteven Whitehouse * @ip: the inode 402b1e71b06SSteven Whitehouse * @page: the page 403b1e71b06SSteven Whitehouse * 404b1e71b06SSteven Whitehouse * Returns: errno 405b1e71b06SSteven Whitehouse */ 406378b6cbfSChristoph Hellwig static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 407b1e71b06SSteven Whitehouse { 408b1e71b06SSteven Whitehouse struct buffer_head *dibh; 409602c89d2SSteven Whitehouse u64 dsize = i_size_read(&ip->i_inode); 410b1e71b06SSteven Whitehouse void *kaddr; 411b1e71b06SSteven Whitehouse int error; 412b1e71b06SSteven Whitehouse 413b1e71b06SSteven Whitehouse /* 414b1e71b06SSteven Whitehouse * Due to the order of unstuffing files and ->fault(), we can be 415b1e71b06SSteven Whitehouse * asked for a zero page in the case of a stuffed file being extended, 416b1e71b06SSteven Whitehouse * so we need to supply one here. It doesn't happen often. 417b1e71b06SSteven Whitehouse */ 418b1e71b06SSteven Whitehouse if (unlikely(page->index)) { 41909cbfeafSKirill A. Shutemov zero_user(page, 0, PAGE_SIZE); 420b1e71b06SSteven Whitehouse SetPageUptodate(page); 421b1e71b06SSteven Whitehouse return 0; 422b1e71b06SSteven Whitehouse } 423b1e71b06SSteven Whitehouse 424b1e71b06SSteven Whitehouse error = gfs2_meta_inode_buffer(ip, &dibh); 425b1e71b06SSteven Whitehouse if (error) 426b1e71b06SSteven Whitehouse return error; 427b1e71b06SSteven Whitehouse 428d9349285SCong Wang kaddr = kmap_atomic(page); 429602c89d2SSteven Whitehouse memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 43009cbfeafSKirill A. Shutemov memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 431d9349285SCong Wang kunmap_atomic(kaddr); 432b1e71b06SSteven Whitehouse flush_dcache_page(page); 433b1e71b06SSteven Whitehouse brelse(dibh); 434b1e71b06SSteven Whitehouse SetPageUptodate(page); 435b1e71b06SSteven Whitehouse 436b1e71b06SSteven Whitehouse return 0; 437b1e71b06SSteven Whitehouse } 438b1e71b06SSteven Whitehouse 439e9b5b23eSMatthew Wilcox (Oracle) /** 440e9b5b23eSMatthew Wilcox (Oracle) * gfs2_read_folio - read a folio from a file 441e9b5b23eSMatthew Wilcox (Oracle) * @file: The file to read 442e9b5b23eSMatthew Wilcox (Oracle) * @folio: The folio in the file 443e9b5b23eSMatthew Wilcox (Oracle) */ 444e9b5b23eSMatthew Wilcox (Oracle) static int gfs2_read_folio(struct file *file, struct folio *folio) 445b1e71b06SSteven Whitehouse { 446e9b5b23eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 4472164f9b9SChristoph Hellwig struct gfs2_inode *ip = GFS2_I(inode); 4482164f9b9SChristoph Hellwig struct gfs2_sbd *sdp = GFS2_SB(inode); 449b1e71b06SSteven Whitehouse int error; 450b1e71b06SSteven Whitehouse 4512164f9b9SChristoph Hellwig if (!gfs2_is_jdata(ip) || 452e9b5b23eSMatthew Wilcox (Oracle) (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { 4537479c505SMatthew Wilcox (Oracle) error = iomap_read_folio(folio, &gfs2_iomap_ops); 454f95cbb44SAndreas Gruenbacher } else if (gfs2_is_stuffed(ip)) { 455e9b5b23eSMatthew Wilcox (Oracle) error = stuffed_readpage(ip, &folio->page); 456e9b5b23eSMatthew Wilcox (Oracle) folio_unlock(folio); 457b1e71b06SSteven Whitehouse } else { 458f132ab7dSMatthew Wilcox (Oracle) error = mpage_read_folio(folio, gfs2_block_map); 459b1e71b06SSteven Whitehouse } 460b1e71b06SSteven Whitehouse 461eb43e660SBob Peterson if (unlikely(gfs2_withdrawn(sdp))) 462b1e71b06SSteven Whitehouse return -EIO; 463b1e71b06SSteven Whitehouse 464b1e71b06SSteven Whitehouse return error; 465b1e71b06SSteven Whitehouse } 466b1e71b06SSteven Whitehouse 467b1e71b06SSteven Whitehouse /** 468b1e71b06SSteven Whitehouse * gfs2_internal_read - read an internal file 469b1e71b06SSteven Whitehouse * @ip: The gfs2 inode 470b1e71b06SSteven Whitehouse * @buf: The buffer to fill 471b1e71b06SSteven Whitehouse * @pos: The file position 472b1e71b06SSteven Whitehouse * @size: The amount to read 473b1e71b06SSteven Whitehouse * 474b1e71b06SSteven Whitehouse */ 475b1e71b06SSteven Whitehouse 4764306629eSAndrew Price int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 4774306629eSAndrew Price unsigned size) 478b1e71b06SSteven Whitehouse { 479b1e71b06SSteven Whitehouse struct address_space *mapping = ip->i_inode.i_mapping; 48045eb0504SAndreas Gruenbacher unsigned long index = *pos >> PAGE_SHIFT; 48109cbfeafSKirill A. Shutemov unsigned offset = *pos & (PAGE_SIZE - 1); 482b1e71b06SSteven Whitehouse unsigned copied = 0; 483b1e71b06SSteven Whitehouse unsigned amt; 484b1e71b06SSteven Whitehouse struct page *page; 485b1e71b06SSteven Whitehouse void *p; 486b1e71b06SSteven Whitehouse 487b1e71b06SSteven Whitehouse do { 488b1e71b06SSteven Whitehouse amt = size - copied; 48909cbfeafSKirill A. Shutemov if (offset + size > PAGE_SIZE) 49009cbfeafSKirill A. Shutemov amt = PAGE_SIZE - offset; 491e9b5b23eSMatthew Wilcox (Oracle) page = read_cache_page(mapping, index, gfs2_read_folio, NULL); 492b1e71b06SSteven Whitehouse if (IS_ERR(page)) 493b1e71b06SSteven Whitehouse return PTR_ERR(page); 494d9349285SCong Wang p = kmap_atomic(page); 495b1e71b06SSteven Whitehouse memcpy(buf + copied, p + offset, amt); 496d9349285SCong Wang kunmap_atomic(p); 49709cbfeafSKirill A. Shutemov put_page(page); 498b1e71b06SSteven Whitehouse copied += amt; 499b1e71b06SSteven Whitehouse index++; 500b1e71b06SSteven Whitehouse offset = 0; 501b1e71b06SSteven Whitehouse } while(copied < size); 502b1e71b06SSteven Whitehouse (*pos) += size; 503b1e71b06SSteven Whitehouse return size; 504b1e71b06SSteven Whitehouse } 505b1e71b06SSteven Whitehouse 506b1e71b06SSteven Whitehouse /** 507d4388340SMatthew Wilcox (Oracle) * gfs2_readahead - Read a bunch of pages at once 508c551f66cSLee Jones * @rac: Read-ahead control structure 509b1e71b06SSteven Whitehouse * 510b1e71b06SSteven Whitehouse * Some notes: 511b1e71b06SSteven Whitehouse * 1. This is only for readahead, so we can simply ignore any things 512b1e71b06SSteven Whitehouse * which are slightly inconvenient (such as locking conflicts between 513b1e71b06SSteven Whitehouse * the page lock and the glock) and return having done no I/O. Its 514b1e71b06SSteven Whitehouse * obviously not something we'd want to do on too regular a basis. 515b1e71b06SSteven Whitehouse * Any I/O we ignore at this time will be done via readpage later. 516b1e71b06SSteven Whitehouse * 2. We don't handle stuffed files here we let readpage do the honours. 517d4388340SMatthew Wilcox (Oracle) * 3. mpage_readahead() does most of the heavy lifting in the common case. 518b1e71b06SSteven Whitehouse * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 519b1e71b06SSteven Whitehouse */ 520b1e71b06SSteven Whitehouse 521d4388340SMatthew Wilcox (Oracle) static void gfs2_readahead(struct readahead_control *rac) 522b1e71b06SSteven Whitehouse { 523d4388340SMatthew Wilcox (Oracle) struct inode *inode = rac->mapping->host; 524b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 525b1e71b06SSteven Whitehouse 5262164f9b9SChristoph Hellwig if (gfs2_is_stuffed(ip)) 5272164f9b9SChristoph Hellwig ; 5282164f9b9SChristoph Hellwig else if (gfs2_is_jdata(ip)) 529d4388340SMatthew Wilcox (Oracle) mpage_readahead(rac, gfs2_block_map); 5302164f9b9SChristoph Hellwig else 5312164f9b9SChristoph Hellwig iomap_readahead(rac, &gfs2_iomap_ops); 532b1e71b06SSteven Whitehouse } 533b1e71b06SSteven Whitehouse 534b1e71b06SSteven Whitehouse /** 535b1e71b06SSteven Whitehouse * adjust_fs_space - Adjusts the free space available due to gfs2_grow 536b1e71b06SSteven Whitehouse * @inode: the rindex inode 537b1e71b06SSteven Whitehouse */ 53864bc06bbSAndreas Gruenbacher void adjust_fs_space(struct inode *inode) 539b1e71b06SSteven Whitehouse { 540d0a22a4bSAndreas Gruenbacher struct gfs2_sbd *sdp = GFS2_SB(inode); 5411946f70aSBenjamin Marzinski struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 542b1e71b06SSteven Whitehouse struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 543b1e71b06SSteven Whitehouse struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 54470c11ba8SBob Peterson struct buffer_head *m_bh; 545b1e71b06SSteven Whitehouse u64 fs_total, new_free; 546b1e71b06SSteven Whitehouse 547d0a22a4bSAndreas Gruenbacher if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) 548d0a22a4bSAndreas Gruenbacher return; 549d0a22a4bSAndreas Gruenbacher 550b1e71b06SSteven Whitehouse /* Total up the file system space, according to the latest rindex. */ 551b1e71b06SSteven Whitehouse fs_total = gfs2_ri_total(sdp); 5521946f70aSBenjamin Marzinski if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 553d0a22a4bSAndreas Gruenbacher goto out; 554b1e71b06SSteven Whitehouse 555b1e71b06SSteven Whitehouse spin_lock(&sdp->sd_statfs_spin); 5561946f70aSBenjamin Marzinski gfs2_statfs_change_in(m_sc, m_bh->b_data + 5571946f70aSBenjamin Marzinski sizeof(struct gfs2_dinode)); 558b1e71b06SSteven Whitehouse if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 559b1e71b06SSteven Whitehouse new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 560b1e71b06SSteven Whitehouse else 561b1e71b06SSteven Whitehouse new_free = 0; 562b1e71b06SSteven Whitehouse spin_unlock(&sdp->sd_statfs_spin); 563b1e71b06SSteven Whitehouse fs_warn(sdp, "File system extended by %llu blocks.\n", 564b1e71b06SSteven Whitehouse (unsigned long long)new_free); 565b1e71b06SSteven Whitehouse gfs2_statfs_change(sdp, new_free, new_free, 0); 5661946f70aSBenjamin Marzinski 56770c11ba8SBob Peterson update_statfs(sdp, m_bh); 5681946f70aSBenjamin Marzinski brelse(m_bh); 569d0a22a4bSAndreas Gruenbacher out: 570d0a22a4bSAndreas Gruenbacher sdp->sd_rindex_uptodate = 0; 571d0a22a4bSAndreas Gruenbacher gfs2_trans_end(sdp); 572b1e71b06SSteven Whitehouse } 573b1e71b06SSteven Whitehouse 574e621900aSMatthew Wilcox (Oracle) static bool jdata_dirty_folio(struct address_space *mapping, 575e621900aSMatthew Wilcox (Oracle) struct folio *folio) 576b1e71b06SSteven Whitehouse { 5776302d6f4SBob Peterson if (current->journal_info) 578e621900aSMatthew Wilcox (Oracle) folio_set_checked(folio); 579e621900aSMatthew Wilcox (Oracle) return block_dirty_folio(mapping, folio); 580b1e71b06SSteven Whitehouse } 581b1e71b06SSteven Whitehouse 582b1e71b06SSteven Whitehouse /** 583b1e71b06SSteven Whitehouse * gfs2_bmap - Block map function 584b1e71b06SSteven Whitehouse * @mapping: Address space info 585b1e71b06SSteven Whitehouse * @lblock: The block to map 586b1e71b06SSteven Whitehouse * 587b1e71b06SSteven Whitehouse * Returns: The disk address for the block or 0 on hole or error 588b1e71b06SSteven Whitehouse */ 589b1e71b06SSteven Whitehouse 590b1e71b06SSteven Whitehouse static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 591b1e71b06SSteven Whitehouse { 592b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 593b1e71b06SSteven Whitehouse struct gfs2_holder i_gh; 594b1e71b06SSteven Whitehouse sector_t dblock = 0; 595b1e71b06SSteven Whitehouse int error; 596b1e71b06SSteven Whitehouse 597b1e71b06SSteven Whitehouse error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 598b1e71b06SSteven Whitehouse if (error) 599b1e71b06SSteven Whitehouse return 0; 600b1e71b06SSteven Whitehouse 601b1e71b06SSteven Whitehouse if (!gfs2_is_stuffed(ip)) 6027770c93aSChristoph Hellwig dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); 603b1e71b06SSteven Whitehouse 604b1e71b06SSteven Whitehouse gfs2_glock_dq_uninit(&i_gh); 605b1e71b06SSteven Whitehouse 606b1e71b06SSteven Whitehouse return dblock; 607b1e71b06SSteven Whitehouse } 608b1e71b06SSteven Whitehouse 609b1e71b06SSteven Whitehouse static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 610b1e71b06SSteven Whitehouse { 611b1e71b06SSteven Whitehouse struct gfs2_bufdata *bd; 612b1e71b06SSteven Whitehouse 613b1e71b06SSteven Whitehouse lock_buffer(bh); 614b1e71b06SSteven Whitehouse gfs2_log_lock(sdp); 615b1e71b06SSteven Whitehouse clear_buffer_dirty(bh); 616b1e71b06SSteven Whitehouse bd = bh->b_private; 617b1e71b06SSteven Whitehouse if (bd) { 618c0752aa7SBob Peterson if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 619c0752aa7SBob Peterson list_del_init(&bd->bd_list); 62068942870SBob Peterson else { 62168942870SBob Peterson spin_lock(&sdp->sd_ail_lock); 62268cd4ce2SBob Peterson gfs2_remove_from_journal(bh, REMOVE_JDATA); 62368942870SBob Peterson spin_unlock(&sdp->sd_ail_lock); 62468942870SBob Peterson } 625b1e71b06SSteven Whitehouse } 626b1e71b06SSteven Whitehouse bh->b_bdev = NULL; 627b1e71b06SSteven Whitehouse clear_buffer_mapped(bh); 628b1e71b06SSteven Whitehouse clear_buffer_req(bh); 629b1e71b06SSteven Whitehouse clear_buffer_new(bh); 630b1e71b06SSteven Whitehouse gfs2_log_unlock(sdp); 631b1e71b06SSteven Whitehouse unlock_buffer(bh); 632b1e71b06SSteven Whitehouse } 633b1e71b06SSteven Whitehouse 6345f4b2976SMatthew Wilcox (Oracle) static void gfs2_invalidate_folio(struct folio *folio, size_t offset, 6355f4b2976SMatthew Wilcox (Oracle) size_t length) 636b1e71b06SSteven Whitehouse { 6375f4b2976SMatthew Wilcox (Oracle) struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); 6385f4b2976SMatthew Wilcox (Oracle) size_t stop = offset + length; 6395f4b2976SMatthew Wilcox (Oracle) int partial_page = (offset || length < folio_size(folio)); 640b1e71b06SSteven Whitehouse struct buffer_head *bh, *head; 641b1e71b06SSteven Whitehouse unsigned long pos = 0; 642b1e71b06SSteven Whitehouse 6435f4b2976SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 6445c0bb97cSLukas Czerner if (!partial_page) 6455f4b2976SMatthew Wilcox (Oracle) folio_clear_checked(folio); 6465f4b2976SMatthew Wilcox (Oracle) head = folio_buffers(folio); 6475f4b2976SMatthew Wilcox (Oracle) if (!head) 648b1e71b06SSteven Whitehouse goto out; 649b1e71b06SSteven Whitehouse 6505f4b2976SMatthew Wilcox (Oracle) bh = head; 651b1e71b06SSteven Whitehouse do { 6525c0bb97cSLukas Czerner if (pos + bh->b_size > stop) 6535c0bb97cSLukas Czerner return; 6545c0bb97cSLukas Czerner 655b1e71b06SSteven Whitehouse if (offset <= pos) 656b1e71b06SSteven Whitehouse gfs2_discard(sdp, bh); 657b1e71b06SSteven Whitehouse pos += bh->b_size; 658b1e71b06SSteven Whitehouse bh = bh->b_this_page; 659b1e71b06SSteven Whitehouse } while (bh != head); 660b1e71b06SSteven Whitehouse out: 6615c0bb97cSLukas Czerner if (!partial_page) 6625f4b2976SMatthew Wilcox (Oracle) filemap_release_folio(folio, 0); 663b1e71b06SSteven Whitehouse } 664b1e71b06SSteven Whitehouse 665b1e71b06SSteven Whitehouse /** 666e45c20d1SMatthew Wilcox (Oracle) * gfs2_release_folio - free the metadata associated with a folio 667e45c20d1SMatthew Wilcox (Oracle) * @folio: the folio that's being released 668b1e71b06SSteven Whitehouse * @gfp_mask: passed from Linux VFS, ignored by us 669b1e71b06SSteven Whitehouse * 670e45c20d1SMatthew Wilcox (Oracle) * Calls try_to_free_buffers() to free the buffers and put the folio if the 6710ebbe4f9SAndreas Gruenbacher * buffers can be released. 672b1e71b06SSteven Whitehouse * 673e45c20d1SMatthew Wilcox (Oracle) * Returns: true if the folio was put or else false 674b1e71b06SSteven Whitehouse */ 675b1e71b06SSteven Whitehouse 676e45c20d1SMatthew Wilcox (Oracle) bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) 677b1e71b06SSteven Whitehouse { 678e45c20d1SMatthew Wilcox (Oracle) struct address_space *mapping = folio->mapping; 679009d8518SSteven Whitehouse struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 680b1e71b06SSteven Whitehouse struct buffer_head *bh, *head; 681b1e71b06SSteven Whitehouse struct gfs2_bufdata *bd; 682b1e71b06SSteven Whitehouse 683e45c20d1SMatthew Wilcox (Oracle) head = folio_buffers(folio); 684e45c20d1SMatthew Wilcox (Oracle) if (!head) 685e45c20d1SMatthew Wilcox (Oracle) return false; 686b1e71b06SSteven Whitehouse 6871c185c02SAndreas Gruenbacher /* 688e45c20d1SMatthew Wilcox (Oracle) * mm accommodates an old ext3 case where clean folios might 689e45c20d1SMatthew Wilcox (Oracle) * not have had the dirty bit cleared. Thus, it can send actual 690e45c20d1SMatthew Wilcox (Oracle) * dirty folios to ->release_folio() via shrink_active_list(). 6911c185c02SAndreas Gruenbacher * 692e45c20d1SMatthew Wilcox (Oracle) * As a workaround, we skip folios that contain dirty buffers 693e45c20d1SMatthew Wilcox (Oracle) * below. Once ->release_folio isn't called on dirty folios 694e45c20d1SMatthew Wilcox (Oracle) * anymore, we can warn on dirty buffers like we used to here 695e45c20d1SMatthew Wilcox (Oracle) * again. 6961c185c02SAndreas Gruenbacher */ 6971c185c02SAndreas Gruenbacher 698b1e71b06SSteven Whitehouse gfs2_log_lock(sdp); 699e45c20d1SMatthew Wilcox (Oracle) bh = head; 700b1e71b06SSteven Whitehouse do { 701b1e71b06SSteven Whitehouse if (atomic_read(&bh->b_count)) 702b1e71b06SSteven Whitehouse goto cannot_release; 703b1e71b06SSteven Whitehouse bd = bh->b_private; 70416ca9412SBenjamin Marzinski if (bd && bd->bd_tr) 705b1e71b06SSteven Whitehouse goto cannot_release; 7061c185c02SAndreas Gruenbacher if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 7071c185c02SAndreas Gruenbacher goto cannot_release; 708b1e71b06SSteven Whitehouse bh = bh->b_this_page; 709b1e71b06SSteven Whitehouse } while (bh != head); 710b1e71b06SSteven Whitehouse 711e45c20d1SMatthew Wilcox (Oracle) bh = head; 712b1e71b06SSteven Whitehouse do { 713b1e71b06SSteven Whitehouse bd = bh->b_private; 714b1e71b06SSteven Whitehouse if (bd) { 715b1e71b06SSteven Whitehouse gfs2_assert_warn(sdp, bd->bd_bh == bh); 716b1e71b06SSteven Whitehouse bd->bd_bh = NULL; 717b1e71b06SSteven Whitehouse bh->b_private = NULL; 718019dd669SBob Peterson /* 719019dd669SBob Peterson * The bd may still be queued as a revoke, in which 720019dd669SBob Peterson * case we must not dequeue nor free it. 721019dd669SBob Peterson */ 722019dd669SBob Peterson if (!bd->bd_blkno && !list_empty(&bd->bd_list)) 723019dd669SBob Peterson list_del_init(&bd->bd_list); 724019dd669SBob Peterson if (list_empty(&bd->bd_list)) 725b1e71b06SSteven Whitehouse kmem_cache_free(gfs2_bufdata_cachep, bd); 726e4f29206SSteven Whitehouse } 727b1e71b06SSteven Whitehouse 728b1e71b06SSteven Whitehouse bh = bh->b_this_page; 729b1e71b06SSteven Whitehouse } while (bh != head); 730e4f29206SSteven Whitehouse gfs2_log_unlock(sdp); 731b1e71b06SSteven Whitehouse 73268189fefSMatthew Wilcox (Oracle) return try_to_free_buffers(folio); 7338f065d36SSteven Whitehouse 734b1e71b06SSteven Whitehouse cannot_release: 735b1e71b06SSteven Whitehouse gfs2_log_unlock(sdp); 736e45c20d1SMatthew Wilcox (Oracle) return false; 737b1e71b06SSteven Whitehouse } 738b1e71b06SSteven Whitehouse 739eadd7535SChristoph Hellwig static const struct address_space_operations gfs2_aops = { 74045138990SSteven Whitehouse .writepages = gfs2_writepages, 741f132ab7dSMatthew Wilcox (Oracle) .read_folio = gfs2_read_folio, 742d4388340SMatthew Wilcox (Oracle) .readahead = gfs2_readahead, 743187c82cbSMatthew Wilcox (Oracle) .dirty_folio = filemap_dirty_folio, 7448597447dSMatthew Wilcox (Oracle) .release_folio = iomap_release_folio, 745d82354f6SMatthew Wilcox (Oracle) .invalidate_folio = iomap_invalidate_folio, 746b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 747967bcc91SAndreas Gruenbacher .direct_IO = noop_direct_IO, 7482ec810d5SMatthew Wilcox (Oracle) .migrate_folio = filemap_migrate_folio, 7492164f9b9SChristoph Hellwig .is_partially_uptodate = iomap_is_partially_uptodate, 750aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 751b1e71b06SSteven Whitehouse }; 752b1e71b06SSteven Whitehouse 753b1e71b06SSteven Whitehouse static const struct address_space_operations gfs2_jdata_aops = { 754b1e71b06SSteven Whitehouse .writepage = gfs2_jdata_writepage, 755b1e71b06SSteven Whitehouse .writepages = gfs2_jdata_writepages, 756f132ab7dSMatthew Wilcox (Oracle) .read_folio = gfs2_read_folio, 757d4388340SMatthew Wilcox (Oracle) .readahead = gfs2_readahead, 758e621900aSMatthew Wilcox (Oracle) .dirty_folio = jdata_dirty_folio, 759b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 7605f4b2976SMatthew Wilcox (Oracle) .invalidate_folio = gfs2_invalidate_folio, 761e45c20d1SMatthew Wilcox (Oracle) .release_folio = gfs2_release_folio, 762b1e71b06SSteven Whitehouse .is_partially_uptodate = block_is_partially_uptodate, 763aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 764b1e71b06SSteven Whitehouse }; 765b1e71b06SSteven Whitehouse 766b1e71b06SSteven Whitehouse void gfs2_set_aops(struct inode *inode) 767b1e71b06SSteven Whitehouse { 768eadd7535SChristoph Hellwig if (gfs2_is_jdata(GFS2_I(inode))) 769b1e71b06SSteven Whitehouse inode->i_mapping->a_ops = &gfs2_jdata_aops; 770b1e71b06SSteven Whitehouse else 771eadd7535SChristoph Hellwig inode->i_mapping->a_ops = &gfs2_aops; 772b1e71b06SSteven Whitehouse } 773