17336d0e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2b1e71b06SSteven Whitehouse /* 3b1e71b06SSteven Whitehouse * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4b1e71b06SSteven Whitehouse * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5b1e71b06SSteven Whitehouse */ 6b1e71b06SSteven Whitehouse 7b1e71b06SSteven Whitehouse #include <linux/sched.h> 8b1e71b06SSteven Whitehouse #include <linux/slab.h> 9b1e71b06SSteven Whitehouse #include <linux/spinlock.h> 10b1e71b06SSteven Whitehouse #include <linux/completion.h> 11b1e71b06SSteven Whitehouse #include <linux/buffer_head.h> 12b1e71b06SSteven Whitehouse #include <linux/pagemap.h> 13b1e71b06SSteven Whitehouse #include <linux/pagevec.h> 14b1e71b06SSteven Whitehouse #include <linux/mpage.h> 15b1e71b06SSteven Whitehouse #include <linux/fs.h> 16b1e71b06SSteven Whitehouse #include <linux/writeback.h> 17b1e71b06SSteven Whitehouse #include <linux/swap.h> 18b1e71b06SSteven Whitehouse #include <linux/gfs2_ondisk.h> 19b1e71b06SSteven Whitehouse #include <linux/backing-dev.h> 20e2e40f2cSChristoph Hellwig #include <linux/uio.h> 21774016b2SSteven Whitehouse #include <trace/events/writeback.h> 2264bc06bbSAndreas Gruenbacher #include <linux/sched/signal.h> 23b1e71b06SSteven Whitehouse 24b1e71b06SSteven Whitehouse #include "gfs2.h" 25b1e71b06SSteven Whitehouse #include "incore.h" 26b1e71b06SSteven Whitehouse #include "bmap.h" 27b1e71b06SSteven Whitehouse #include "glock.h" 28b1e71b06SSteven Whitehouse #include "inode.h" 29b1e71b06SSteven Whitehouse #include "log.h" 30b1e71b06SSteven Whitehouse #include "meta_io.h" 31b1e71b06SSteven Whitehouse #include "quota.h" 32b1e71b06SSteven Whitehouse #include "trans.h" 33b1e71b06SSteven Whitehouse #include "rgrp.h" 34b1e71b06SSteven Whitehouse #include "super.h" 35b1e71b06SSteven Whitehouse #include "util.h" 36b1e71b06SSteven Whitehouse #include "glops.h" 3764bc06bbSAndreas Gruenbacher #include "aops.h" 38b1e71b06SSteven Whitehouse 39b1e71b06SSteven Whitehouse 4064bc06bbSAndreas Gruenbacher void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 4188b65ce5SAndreas Gruenbacher unsigned int from, unsigned int len) 42b1e71b06SSteven Whitehouse { 43b1e71b06SSteven Whitehouse struct buffer_head *head = page_buffers(page); 44b1e71b06SSteven Whitehouse unsigned int bsize = head->b_size; 45b1e71b06SSteven Whitehouse struct buffer_head *bh; 4688b65ce5SAndreas Gruenbacher unsigned int to = from + len; 47b1e71b06SSteven Whitehouse unsigned int start, end; 48b1e71b06SSteven Whitehouse 49b1e71b06SSteven Whitehouse for (bh = head, start = 0; bh != head || !start; 50b1e71b06SSteven Whitehouse bh = bh->b_this_page, start = end) { 51b1e71b06SSteven Whitehouse end = start + bsize; 5288b65ce5SAndreas Gruenbacher if (end <= from) 53b1e71b06SSteven Whitehouse continue; 5488b65ce5SAndreas Gruenbacher if (start >= to) 5588b65ce5SAndreas Gruenbacher break; 56b1e71b06SSteven Whitehouse set_buffer_uptodate(bh); 57350a9b0aSSteven Whitehouse gfs2_trans_add_data(ip->i_gl, bh); 58b1e71b06SSteven Whitehouse } 59b1e71b06SSteven Whitehouse } 60b1e71b06SSteven Whitehouse 61b1e71b06SSteven Whitehouse /** 62b1e71b06SSteven Whitehouse * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 63b1e71b06SSteven Whitehouse * @inode: The inode 64b1e71b06SSteven Whitehouse * @lblock: The block number to look up 65b1e71b06SSteven Whitehouse * @bh_result: The buffer head to return the result in 66b1e71b06SSteven Whitehouse * @create: Non-zero if we may add block to the file 67b1e71b06SSteven Whitehouse * 68b1e71b06SSteven Whitehouse * Returns: errno 69b1e71b06SSteven Whitehouse */ 70b1e71b06SSteven Whitehouse 71b1e71b06SSteven Whitehouse static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 72b1e71b06SSteven Whitehouse struct buffer_head *bh_result, int create) 73b1e71b06SSteven Whitehouse { 74b1e71b06SSteven Whitehouse int error; 75b1e71b06SSteven Whitehouse 76b1e71b06SSteven Whitehouse error = gfs2_block_map(inode, lblock, bh_result, 0); 77b1e71b06SSteven Whitehouse if (error) 78b1e71b06SSteven Whitehouse return error; 79b1e71b06SSteven Whitehouse if (!buffer_mapped(bh_result)) 80b1e71b06SSteven Whitehouse return -EIO; 81b1e71b06SSteven Whitehouse return 0; 82b1e71b06SSteven Whitehouse } 83b1e71b06SSteven Whitehouse 84b1e71b06SSteven Whitehouse /** 8559c01c50SChristoph Hellwig * gfs2_writepage - Write page for writeback mappings 8659c01c50SChristoph Hellwig * @page: The page 87b1e71b06SSteven Whitehouse * @wbc: The writeback control 88b1e71b06SSteven Whitehouse */ 8959c01c50SChristoph Hellwig static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 90b1e71b06SSteven Whitehouse { 91b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 92b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 93b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 94b1e71b06SSteven Whitehouse loff_t i_size = i_size_read(inode); 9509cbfeafSKirill A. Shutemov pgoff_t end_index = i_size >> PAGE_SHIFT; 96b1e71b06SSteven Whitehouse unsigned offset; 97b1e71b06SSteven Whitehouse 98b1e71b06SSteven Whitehouse if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 99b1e71b06SSteven Whitehouse goto out; 100b1e71b06SSteven Whitehouse if (current->journal_info) 101b1e71b06SSteven Whitehouse goto redirty; 102b1e71b06SSteven Whitehouse /* Is the page fully outside i_size? (truncate in progress) */ 10309cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 104b1e71b06SSteven Whitehouse if (page->index > end_index || (page->index == end_index && !offset)) { 10509cbfeafSKirill A. Shutemov page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 106b1e71b06SSteven Whitehouse goto out; 107b1e71b06SSteven Whitehouse } 10859c01c50SChristoph Hellwig 10959c01c50SChristoph Hellwig return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 11059c01c50SChristoph Hellwig 111b1e71b06SSteven Whitehouse redirty: 112b1e71b06SSteven Whitehouse redirty_page_for_writepage(wbc, page); 113b1e71b06SSteven Whitehouse out: 114b1e71b06SSteven Whitehouse unlock_page(page); 115b1e71b06SSteven Whitehouse return 0; 116b1e71b06SSteven Whitehouse } 117b1e71b06SSteven Whitehouse 118fd4c5748SBenjamin Marzinski /* This is the same as calling block_write_full_page, but it also 119fd4c5748SBenjamin Marzinski * writes pages outside of i_size 120fd4c5748SBenjamin Marzinski */ 121c548a1c1SAndrew Price static int gfs2_write_full_page(struct page *page, get_block_t *get_block, 122fd4c5748SBenjamin Marzinski struct writeback_control *wbc) 123fd4c5748SBenjamin Marzinski { 124fd4c5748SBenjamin Marzinski struct inode * const inode = page->mapping->host; 125fd4c5748SBenjamin Marzinski loff_t i_size = i_size_read(inode); 126fd4c5748SBenjamin Marzinski const pgoff_t end_index = i_size >> PAGE_SHIFT; 127fd4c5748SBenjamin Marzinski unsigned offset; 128fd4c5748SBenjamin Marzinski 129fd4c5748SBenjamin Marzinski /* 130fd4c5748SBenjamin Marzinski * The page straddles i_size. It must be zeroed out on each and every 131fd4c5748SBenjamin Marzinski * writepage invocation because it may be mmapped. "A file is mapped 132fd4c5748SBenjamin Marzinski * in multiples of the page size. For a file that is not a multiple of 133fd4c5748SBenjamin Marzinski * the page size, the remaining memory is zeroed when mapped, and 134fd4c5748SBenjamin Marzinski * writes to that region are not written out to the file." 135fd4c5748SBenjamin Marzinski */ 136fd4c5748SBenjamin Marzinski offset = i_size & (PAGE_SIZE - 1); 137fd4c5748SBenjamin Marzinski if (page->index == end_index && offset) 138fd4c5748SBenjamin Marzinski zero_user_segment(page, offset, PAGE_SIZE); 139fd4c5748SBenjamin Marzinski 140fd4c5748SBenjamin Marzinski return __block_write_full_page(inode, page, get_block, wbc, 141fd4c5748SBenjamin Marzinski end_buffer_async_write); 142fd4c5748SBenjamin Marzinski } 143fd4c5748SBenjamin Marzinski 144b1e71b06SSteven Whitehouse /** 145b1e71b06SSteven Whitehouse * __gfs2_jdata_writepage - The core of jdata writepage 146b1e71b06SSteven Whitehouse * @page: The page to write 147b1e71b06SSteven Whitehouse * @wbc: The writeback control 148b1e71b06SSteven Whitehouse * 149b1e71b06SSteven Whitehouse * This is shared between writepage and writepages and implements the 150b1e71b06SSteven Whitehouse * core of the writepage operation. If a transaction is required then 151b1e71b06SSteven Whitehouse * PageChecked will have been set and the transaction will have 152b1e71b06SSteven Whitehouse * already been started before this is called. 153b1e71b06SSteven Whitehouse */ 154b1e71b06SSteven Whitehouse 155b1e71b06SSteven Whitehouse static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 156b1e71b06SSteven Whitehouse { 157b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 158b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 159b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 160b1e71b06SSteven Whitehouse 161b1e71b06SSteven Whitehouse if (PageChecked(page)) { 162b1e71b06SSteven Whitehouse ClearPageChecked(page); 163b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) { 164b1e71b06SSteven Whitehouse create_empty_buffers(page, inode->i_sb->s_blocksize, 16547a9a527SFabian Frederick BIT(BH_Dirty)|BIT(BH_Uptodate)); 166b1e71b06SSteven Whitehouse } 16788b65ce5SAndreas Gruenbacher gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize); 168b1e71b06SSteven Whitehouse } 169fd4c5748SBenjamin Marzinski return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 170b1e71b06SSteven Whitehouse } 171b1e71b06SSteven Whitehouse 172b1e71b06SSteven Whitehouse /** 173b1e71b06SSteven Whitehouse * gfs2_jdata_writepage - Write complete page 174b1e71b06SSteven Whitehouse * @page: Page to write 1751272574bSFabian Frederick * @wbc: The writeback control 176b1e71b06SSteven Whitehouse * 177b1e71b06SSteven Whitehouse * Returns: errno 178b1e71b06SSteven Whitehouse * 179b1e71b06SSteven Whitehouse */ 180b1e71b06SSteven Whitehouse 181b1e71b06SSteven Whitehouse static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 182b1e71b06SSteven Whitehouse { 183b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 184fd4c5748SBenjamin Marzinski struct gfs2_inode *ip = GFS2_I(inode); 185b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 186b1e71b06SSteven Whitehouse 187fd4c5748SBenjamin Marzinski if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 188fd4c5748SBenjamin Marzinski goto out; 189fd4c5748SBenjamin Marzinski if (PageChecked(page) || current->journal_info) 190b1e71b06SSteven Whitehouse goto out_ignore; 191e556280dSBob Peterson return __gfs2_jdata_writepage(page, wbc); 192b1e71b06SSteven Whitehouse 193b1e71b06SSteven Whitehouse out_ignore: 194b1e71b06SSteven Whitehouse redirty_page_for_writepage(wbc, page); 195fd4c5748SBenjamin Marzinski out: 196b1e71b06SSteven Whitehouse unlock_page(page); 197b1e71b06SSteven Whitehouse return 0; 198b1e71b06SSteven Whitehouse } 199b1e71b06SSteven Whitehouse 200b1e71b06SSteven Whitehouse /** 20145138990SSteven Whitehouse * gfs2_writepages - Write a bunch of dirty pages back to disk 202b1e71b06SSteven Whitehouse * @mapping: The mapping to write 203b1e71b06SSteven Whitehouse * @wbc: Write-back control 204b1e71b06SSteven Whitehouse * 20545138990SSteven Whitehouse * Used for both ordered and writeback modes. 206b1e71b06SSteven Whitehouse */ 20745138990SSteven Whitehouse static int gfs2_writepages(struct address_space *mapping, 208b1e71b06SSteven Whitehouse struct writeback_control *wbc) 209b1e71b06SSteven Whitehouse { 210b066a4eeSAbhi Das struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 211b066a4eeSAbhi Das int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 212b066a4eeSAbhi Das 213b066a4eeSAbhi Das /* 214b066a4eeSAbhi Das * Even if we didn't write any pages here, we might still be holding 215b066a4eeSAbhi Das * dirty pages in the ail. We forcibly flush the ail because we don't 216b066a4eeSAbhi Das * want balance_dirty_pages() to loop indefinitely trying to write out 217b066a4eeSAbhi Das * pages held in the ail that it can't find. 218b066a4eeSAbhi Das */ 219b066a4eeSAbhi Das if (ret == 0) 220b066a4eeSAbhi Das set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 221b066a4eeSAbhi Das 222b066a4eeSAbhi Das return ret; 223b1e71b06SSteven Whitehouse } 224b1e71b06SSteven Whitehouse 225b1e71b06SSteven Whitehouse /** 226b1e71b06SSteven Whitehouse * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 227b1e71b06SSteven Whitehouse * @mapping: The mapping 228b1e71b06SSteven Whitehouse * @wbc: The writeback control 229b1e71b06SSteven Whitehouse * @pvec: The vector of pages 230b1e71b06SSteven Whitehouse * @nr_pages: The number of pages to write 2311272574bSFabian Frederick * @done_index: Page index 232b1e71b06SSteven Whitehouse * 233b1e71b06SSteven Whitehouse * Returns: non-zero if loop should terminate, zero otherwise 234b1e71b06SSteven Whitehouse */ 235b1e71b06SSteven Whitehouse 236b1e71b06SSteven Whitehouse static int gfs2_write_jdata_pagevec(struct address_space *mapping, 237b1e71b06SSteven Whitehouse struct writeback_control *wbc, 238b1e71b06SSteven Whitehouse struct pagevec *pvec, 2399aa01593SAndreas Gruenbacher int nr_pages, 240774016b2SSteven Whitehouse pgoff_t *done_index) 241b1e71b06SSteven Whitehouse { 242b1e71b06SSteven Whitehouse struct inode *inode = mapping->host; 243b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 24445eb0504SAndreas Gruenbacher unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits); 245b1e71b06SSteven Whitehouse int i; 246b1e71b06SSteven Whitehouse int ret; 247b1e71b06SSteven Whitehouse 248b1e71b06SSteven Whitehouse ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 249b1e71b06SSteven Whitehouse if (ret < 0) 250b1e71b06SSteven Whitehouse return ret; 251b1e71b06SSteven Whitehouse 252b1e71b06SSteven Whitehouse for(i = 0; i < nr_pages; i++) { 253b1e71b06SSteven Whitehouse struct page *page = pvec->pages[i]; 254b1e71b06SSteven Whitehouse 255774016b2SSteven Whitehouse *done_index = page->index; 256774016b2SSteven Whitehouse 257b1e71b06SSteven Whitehouse lock_page(page); 258b1e71b06SSteven Whitehouse 259b1e71b06SSteven Whitehouse if (unlikely(page->mapping != mapping)) { 260774016b2SSteven Whitehouse continue_unlock: 261b1e71b06SSteven Whitehouse unlock_page(page); 262b1e71b06SSteven Whitehouse continue; 263b1e71b06SSteven Whitehouse } 264b1e71b06SSteven Whitehouse 265774016b2SSteven Whitehouse if (!PageDirty(page)) { 266774016b2SSteven Whitehouse /* someone wrote it for us */ 267774016b2SSteven Whitehouse goto continue_unlock; 268b1e71b06SSteven Whitehouse } 269b1e71b06SSteven Whitehouse 270774016b2SSteven Whitehouse if (PageWriteback(page)) { 271b1e71b06SSteven Whitehouse if (wbc->sync_mode != WB_SYNC_NONE) 272b1e71b06SSteven Whitehouse wait_on_page_writeback(page); 273774016b2SSteven Whitehouse else 274774016b2SSteven Whitehouse goto continue_unlock; 275b1e71b06SSteven Whitehouse } 276b1e71b06SSteven Whitehouse 277774016b2SSteven Whitehouse BUG_ON(PageWriteback(page)); 278774016b2SSteven Whitehouse if (!clear_page_dirty_for_io(page)) 279774016b2SSteven Whitehouse goto continue_unlock; 280774016b2SSteven Whitehouse 281de1414a6SChristoph Hellwig trace_wbc_writepage(wbc, inode_to_bdi(inode)); 282b1e71b06SSteven Whitehouse 283b1e71b06SSteven Whitehouse ret = __gfs2_jdata_writepage(page, wbc); 284774016b2SSteven Whitehouse if (unlikely(ret)) { 285774016b2SSteven Whitehouse if (ret == AOP_WRITEPAGE_ACTIVATE) { 286774016b2SSteven Whitehouse unlock_page(page); 287774016b2SSteven Whitehouse ret = 0; 288774016b2SSteven Whitehouse } else { 289b1e71b06SSteven Whitehouse 290774016b2SSteven Whitehouse /* 291774016b2SSteven Whitehouse * done_index is set past this page, 292774016b2SSteven Whitehouse * so media errors will not choke 293774016b2SSteven Whitehouse * background writeout for the entire 294774016b2SSteven Whitehouse * file. This has consequences for 295774016b2SSteven Whitehouse * range_cyclic semantics (ie. it may 296774016b2SSteven Whitehouse * not be suitable for data integrity 297774016b2SSteven Whitehouse * writeout). 298774016b2SSteven Whitehouse */ 299774016b2SSteven Whitehouse *done_index = page->index + 1; 300b1e71b06SSteven Whitehouse ret = 1; 301774016b2SSteven Whitehouse break; 302774016b2SSteven Whitehouse } 303774016b2SSteven Whitehouse } 304774016b2SSteven Whitehouse 305774016b2SSteven Whitehouse /* 306774016b2SSteven Whitehouse * We stop writing back only if we are not doing 307774016b2SSteven Whitehouse * integrity sync. In case of integrity sync we have to 308774016b2SSteven Whitehouse * keep going until we have written all the pages 309774016b2SSteven Whitehouse * we tagged for writeback prior to entering this loop. 310774016b2SSteven Whitehouse */ 311774016b2SSteven Whitehouse if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 312774016b2SSteven Whitehouse ret = 1; 313774016b2SSteven Whitehouse break; 314774016b2SSteven Whitehouse } 315774016b2SSteven Whitehouse 316b1e71b06SSteven Whitehouse } 317b1e71b06SSteven Whitehouse gfs2_trans_end(sdp); 318b1e71b06SSteven Whitehouse return ret; 319b1e71b06SSteven Whitehouse } 320b1e71b06SSteven Whitehouse 321b1e71b06SSteven Whitehouse /** 322b1e71b06SSteven Whitehouse * gfs2_write_cache_jdata - Like write_cache_pages but different 323b1e71b06SSteven Whitehouse * @mapping: The mapping to write 324b1e71b06SSteven Whitehouse * @wbc: The writeback control 325b1e71b06SSteven Whitehouse * 326b1e71b06SSteven Whitehouse * The reason that we use our own function here is that we need to 327b1e71b06SSteven Whitehouse * start transactions before we grab page locks. This allows us 328b1e71b06SSteven Whitehouse * to get the ordering right. 329b1e71b06SSteven Whitehouse */ 330b1e71b06SSteven Whitehouse 331b1e71b06SSteven Whitehouse static int gfs2_write_cache_jdata(struct address_space *mapping, 332b1e71b06SSteven Whitehouse struct writeback_control *wbc) 333b1e71b06SSteven Whitehouse { 334b1e71b06SSteven Whitehouse int ret = 0; 335b1e71b06SSteven Whitehouse int done = 0; 336b1e71b06SSteven Whitehouse struct pagevec pvec; 337b1e71b06SSteven Whitehouse int nr_pages; 338774016b2SSteven Whitehouse pgoff_t uninitialized_var(writeback_index); 339b1e71b06SSteven Whitehouse pgoff_t index; 340b1e71b06SSteven Whitehouse pgoff_t end; 341774016b2SSteven Whitehouse pgoff_t done_index; 342774016b2SSteven Whitehouse int cycled; 343b1e71b06SSteven Whitehouse int range_whole = 0; 34410bbd235SMatthew Wilcox xa_mark_t tag; 345b1e71b06SSteven Whitehouse 34686679820SMel Gorman pagevec_init(&pvec); 347b1e71b06SSteven Whitehouse if (wbc->range_cyclic) { 348774016b2SSteven Whitehouse writeback_index = mapping->writeback_index; /* prev offset */ 349774016b2SSteven Whitehouse index = writeback_index; 350774016b2SSteven Whitehouse if (index == 0) 351774016b2SSteven Whitehouse cycled = 1; 352774016b2SSteven Whitehouse else 353774016b2SSteven Whitehouse cycled = 0; 354b1e71b06SSteven Whitehouse end = -1; 355b1e71b06SSteven Whitehouse } else { 35609cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT; 35709cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT; 358b1e71b06SSteven Whitehouse if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 359b1e71b06SSteven Whitehouse range_whole = 1; 360774016b2SSteven Whitehouse cycled = 1; /* ignore range_cyclic tests */ 361b1e71b06SSteven Whitehouse } 362774016b2SSteven Whitehouse if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 363774016b2SSteven Whitehouse tag = PAGECACHE_TAG_TOWRITE; 364774016b2SSteven Whitehouse else 365774016b2SSteven Whitehouse tag = PAGECACHE_TAG_DIRTY; 366b1e71b06SSteven Whitehouse 367b1e71b06SSteven Whitehouse retry: 368774016b2SSteven Whitehouse if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 369774016b2SSteven Whitehouse tag_pages_for_writeback(mapping, index, end); 370774016b2SSteven Whitehouse done_index = index; 371774016b2SSteven Whitehouse while (!done && (index <= end)) { 372d2bc5b3cSJan Kara nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 37367fd707fSJan Kara tag); 374774016b2SSteven Whitehouse if (nr_pages == 0) 375774016b2SSteven Whitehouse break; 376774016b2SSteven Whitehouse 3779aa01593SAndreas Gruenbacher ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); 378b1e71b06SSteven Whitehouse if (ret) 379b1e71b06SSteven Whitehouse done = 1; 380b1e71b06SSteven Whitehouse if (ret > 0) 381b1e71b06SSteven Whitehouse ret = 0; 382b1e71b06SSteven Whitehouse pagevec_release(&pvec); 383b1e71b06SSteven Whitehouse cond_resched(); 384b1e71b06SSteven Whitehouse } 385b1e71b06SSteven Whitehouse 386774016b2SSteven Whitehouse if (!cycled && !done) { 387b1e71b06SSteven Whitehouse /* 388774016b2SSteven Whitehouse * range_cyclic: 389b1e71b06SSteven Whitehouse * We hit the last page and there is more work to be done: wrap 390b1e71b06SSteven Whitehouse * back to the start of the file 391b1e71b06SSteven Whitehouse */ 392774016b2SSteven Whitehouse cycled = 1; 393b1e71b06SSteven Whitehouse index = 0; 394774016b2SSteven Whitehouse end = writeback_index - 1; 395b1e71b06SSteven Whitehouse goto retry; 396b1e71b06SSteven Whitehouse } 397b1e71b06SSteven Whitehouse 398b1e71b06SSteven Whitehouse if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 399774016b2SSteven Whitehouse mapping->writeback_index = done_index; 400774016b2SSteven Whitehouse 401b1e71b06SSteven Whitehouse return ret; 402b1e71b06SSteven Whitehouse } 403b1e71b06SSteven Whitehouse 404b1e71b06SSteven Whitehouse 405b1e71b06SSteven Whitehouse /** 406b1e71b06SSteven Whitehouse * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 407b1e71b06SSteven Whitehouse * @mapping: The mapping to write 408b1e71b06SSteven Whitehouse * @wbc: The writeback control 409b1e71b06SSteven Whitehouse * 410b1e71b06SSteven Whitehouse */ 411b1e71b06SSteven Whitehouse 412b1e71b06SSteven Whitehouse static int gfs2_jdata_writepages(struct address_space *mapping, 413b1e71b06SSteven Whitehouse struct writeback_control *wbc) 414b1e71b06SSteven Whitehouse { 415b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 416b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 417b1e71b06SSteven Whitehouse int ret; 418b1e71b06SSteven Whitehouse 419b1e71b06SSteven Whitehouse ret = gfs2_write_cache_jdata(mapping, wbc); 420b1e71b06SSteven Whitehouse if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 421805c0907SBob Peterson gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 422805c0907SBob Peterson GFS2_LFC_JDATA_WPAGES); 423b1e71b06SSteven Whitehouse ret = gfs2_write_cache_jdata(mapping, wbc); 424b1e71b06SSteven Whitehouse } 425b1e71b06SSteven Whitehouse return ret; 426b1e71b06SSteven Whitehouse } 427b1e71b06SSteven Whitehouse 428b1e71b06SSteven Whitehouse /** 429b1e71b06SSteven Whitehouse * stuffed_readpage - Fill in a Linux page with stuffed file data 430b1e71b06SSteven Whitehouse * @ip: the inode 431b1e71b06SSteven Whitehouse * @page: the page 432b1e71b06SSteven Whitehouse * 433b1e71b06SSteven Whitehouse * Returns: errno 434b1e71b06SSteven Whitehouse */ 435378b6cbfSChristoph Hellwig static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 436b1e71b06SSteven Whitehouse { 437b1e71b06SSteven Whitehouse struct buffer_head *dibh; 438602c89d2SSteven Whitehouse u64 dsize = i_size_read(&ip->i_inode); 439b1e71b06SSteven Whitehouse void *kaddr; 440b1e71b06SSteven Whitehouse int error; 441b1e71b06SSteven Whitehouse 442b1e71b06SSteven Whitehouse /* 443b1e71b06SSteven Whitehouse * Due to the order of unstuffing files and ->fault(), we can be 444b1e71b06SSteven Whitehouse * asked for a zero page in the case of a stuffed file being extended, 445b1e71b06SSteven Whitehouse * so we need to supply one here. It doesn't happen often. 446b1e71b06SSteven Whitehouse */ 447b1e71b06SSteven Whitehouse if (unlikely(page->index)) { 44809cbfeafSKirill A. Shutemov zero_user(page, 0, PAGE_SIZE); 449b1e71b06SSteven Whitehouse SetPageUptodate(page); 450b1e71b06SSteven Whitehouse return 0; 451b1e71b06SSteven Whitehouse } 452b1e71b06SSteven Whitehouse 453b1e71b06SSteven Whitehouse error = gfs2_meta_inode_buffer(ip, &dibh); 454b1e71b06SSteven Whitehouse if (error) 455b1e71b06SSteven Whitehouse return error; 456b1e71b06SSteven Whitehouse 457d9349285SCong Wang kaddr = kmap_atomic(page); 458235628c5SAndreas Gruenbacher if (dsize > gfs2_max_stuffed_size(ip)) 459235628c5SAndreas Gruenbacher dsize = gfs2_max_stuffed_size(ip); 460602c89d2SSteven Whitehouse memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 46109cbfeafSKirill A. Shutemov memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 462d9349285SCong Wang kunmap_atomic(kaddr); 463b1e71b06SSteven Whitehouse flush_dcache_page(page); 464b1e71b06SSteven Whitehouse brelse(dibh); 465b1e71b06SSteven Whitehouse SetPageUptodate(page); 466b1e71b06SSteven Whitehouse 467b1e71b06SSteven Whitehouse return 0; 468b1e71b06SSteven Whitehouse } 469b1e71b06SSteven Whitehouse 470b1e71b06SSteven Whitehouse 471b1e71b06SSteven Whitehouse /** 472b1e71b06SSteven Whitehouse * __gfs2_readpage - readpage 473b1e71b06SSteven Whitehouse * @file: The file to read a page for 474b1e71b06SSteven Whitehouse * @page: The page to read 475b1e71b06SSteven Whitehouse * 4769db115a0SAndreas Gruenbacher * This is the core of gfs2's readpage. It's used by the internal file 4779db115a0SAndreas Gruenbacher * reading code as in that case we already hold the glock. Also it's 478b1e71b06SSteven Whitehouse * called by gfs2_readpage() once the required lock has been granted. 479b1e71b06SSteven Whitehouse */ 480b1e71b06SSteven Whitehouse 481b1e71b06SSteven Whitehouse static int __gfs2_readpage(void *file, struct page *page) 482b1e71b06SSteven Whitehouse { 483b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(page->mapping->host); 484b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 485f95cbb44SAndreas Gruenbacher 486b1e71b06SSteven Whitehouse int error; 487b1e71b06SSteven Whitehouse 488f95cbb44SAndreas Gruenbacher if (i_blocksize(page->mapping->host) == PAGE_SIZE && 489f95cbb44SAndreas Gruenbacher !page_has_buffers(page)) { 490f95cbb44SAndreas Gruenbacher error = iomap_readpage(page, &gfs2_iomap_ops); 491f95cbb44SAndreas Gruenbacher } else if (gfs2_is_stuffed(ip)) { 492b1e71b06SSteven Whitehouse error = stuffed_readpage(ip, page); 493b1e71b06SSteven Whitehouse unlock_page(page); 494b1e71b06SSteven Whitehouse } else { 495b1e71b06SSteven Whitehouse error = mpage_readpage(page, gfs2_block_map); 496b1e71b06SSteven Whitehouse } 497b1e71b06SSteven Whitehouse 498eb43e660SBob Peterson if (unlikely(gfs2_withdrawn(sdp))) 499b1e71b06SSteven Whitehouse return -EIO; 500b1e71b06SSteven Whitehouse 501b1e71b06SSteven Whitehouse return error; 502b1e71b06SSteven Whitehouse } 503b1e71b06SSteven Whitehouse 504b1e71b06SSteven Whitehouse /** 505b1e71b06SSteven Whitehouse * gfs2_readpage - read a page of a file 506b1e71b06SSteven Whitehouse * @file: The file to read 507b1e71b06SSteven Whitehouse * @page: The page of the file 508b1e71b06SSteven Whitehouse * 509b1e71b06SSteven Whitehouse * This deals with the locking required. We have to unlock and 510b1e71b06SSteven Whitehouse * relock the page in order to get the locking in the right 511b1e71b06SSteven Whitehouse * order. 512b1e71b06SSteven Whitehouse */ 513b1e71b06SSteven Whitehouse 514b1e71b06SSteven Whitehouse static int gfs2_readpage(struct file *file, struct page *page) 515b1e71b06SSteven Whitehouse { 516b1e71b06SSteven Whitehouse struct address_space *mapping = page->mapping; 517b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 518b1e71b06SSteven Whitehouse struct gfs2_holder gh; 519b1e71b06SSteven Whitehouse int error; 520b1e71b06SSteven Whitehouse 521b1e71b06SSteven Whitehouse unlock_page(page); 522b1e71b06SSteven Whitehouse gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 523b1e71b06SSteven Whitehouse error = gfs2_glock_nq(&gh); 524b1e71b06SSteven Whitehouse if (unlikely(error)) 525b1e71b06SSteven Whitehouse goto out; 526b1e71b06SSteven Whitehouse error = AOP_TRUNCATED_PAGE; 527b1e71b06SSteven Whitehouse lock_page(page); 528b1e71b06SSteven Whitehouse if (page->mapping == mapping && !PageUptodate(page)) 529b1e71b06SSteven Whitehouse error = __gfs2_readpage(file, page); 530b1e71b06SSteven Whitehouse else 531b1e71b06SSteven Whitehouse unlock_page(page); 532b1e71b06SSteven Whitehouse gfs2_glock_dq(&gh); 533b1e71b06SSteven Whitehouse out: 534b1e71b06SSteven Whitehouse gfs2_holder_uninit(&gh); 535b1e71b06SSteven Whitehouse if (error && error != AOP_TRUNCATED_PAGE) 536b1e71b06SSteven Whitehouse lock_page(page); 537b1e71b06SSteven Whitehouse return error; 538b1e71b06SSteven Whitehouse } 539b1e71b06SSteven Whitehouse 540b1e71b06SSteven Whitehouse /** 541b1e71b06SSteven Whitehouse * gfs2_internal_read - read an internal file 542b1e71b06SSteven Whitehouse * @ip: The gfs2 inode 543b1e71b06SSteven Whitehouse * @buf: The buffer to fill 544b1e71b06SSteven Whitehouse * @pos: The file position 545b1e71b06SSteven Whitehouse * @size: The amount to read 546b1e71b06SSteven Whitehouse * 547b1e71b06SSteven Whitehouse */ 548b1e71b06SSteven Whitehouse 5494306629eSAndrew Price int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 5504306629eSAndrew Price unsigned size) 551b1e71b06SSteven Whitehouse { 552b1e71b06SSteven Whitehouse struct address_space *mapping = ip->i_inode.i_mapping; 55345eb0504SAndreas Gruenbacher unsigned long index = *pos >> PAGE_SHIFT; 55409cbfeafSKirill A. Shutemov unsigned offset = *pos & (PAGE_SIZE - 1); 555b1e71b06SSteven Whitehouse unsigned copied = 0; 556b1e71b06SSteven Whitehouse unsigned amt; 557b1e71b06SSteven Whitehouse struct page *page; 558b1e71b06SSteven Whitehouse void *p; 559b1e71b06SSteven Whitehouse 560b1e71b06SSteven Whitehouse do { 561b1e71b06SSteven Whitehouse amt = size - copied; 56209cbfeafSKirill A. Shutemov if (offset + size > PAGE_SIZE) 56309cbfeafSKirill A. Shutemov amt = PAGE_SIZE - offset; 564b1e71b06SSteven Whitehouse page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 565b1e71b06SSteven Whitehouse if (IS_ERR(page)) 566b1e71b06SSteven Whitehouse return PTR_ERR(page); 567d9349285SCong Wang p = kmap_atomic(page); 568b1e71b06SSteven Whitehouse memcpy(buf + copied, p + offset, amt); 569d9349285SCong Wang kunmap_atomic(p); 57009cbfeafSKirill A. Shutemov put_page(page); 571b1e71b06SSteven Whitehouse copied += amt; 572b1e71b06SSteven Whitehouse index++; 573b1e71b06SSteven Whitehouse offset = 0; 574b1e71b06SSteven Whitehouse } while(copied < size); 575b1e71b06SSteven Whitehouse (*pos) += size; 576b1e71b06SSteven Whitehouse return size; 577b1e71b06SSteven Whitehouse } 578b1e71b06SSteven Whitehouse 579b1e71b06SSteven Whitehouse /** 580d4388340SMatthew Wilcox (Oracle) * gfs2_readahead - Read a bunch of pages at once 5811272574bSFabian Frederick * @file: The file to read from 5821272574bSFabian Frederick * @mapping: Address space info 5831272574bSFabian Frederick * @pages: List of pages to read 5841272574bSFabian Frederick * @nr_pages: Number of pages to read 585b1e71b06SSteven Whitehouse * 586b1e71b06SSteven Whitehouse * Some notes: 587b1e71b06SSteven Whitehouse * 1. This is only for readahead, so we can simply ignore any things 588b1e71b06SSteven Whitehouse * which are slightly inconvenient (such as locking conflicts between 589b1e71b06SSteven Whitehouse * the page lock and the glock) and return having done no I/O. Its 590b1e71b06SSteven Whitehouse * obviously not something we'd want to do on too regular a basis. 591b1e71b06SSteven Whitehouse * Any I/O we ignore at this time will be done via readpage later. 592b1e71b06SSteven Whitehouse * 2. We don't handle stuffed files here we let readpage do the honours. 593d4388340SMatthew Wilcox (Oracle) * 3. mpage_readahead() does most of the heavy lifting in the common case. 594b1e71b06SSteven Whitehouse * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 595b1e71b06SSteven Whitehouse */ 596b1e71b06SSteven Whitehouse 597d4388340SMatthew Wilcox (Oracle) static void gfs2_readahead(struct readahead_control *rac) 598b1e71b06SSteven Whitehouse { 599d4388340SMatthew Wilcox (Oracle) struct inode *inode = rac->mapping->host; 600b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 601b1e71b06SSteven Whitehouse struct gfs2_holder gh; 602b1e71b06SSteven Whitehouse 603b1e71b06SSteven Whitehouse gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 604d4388340SMatthew Wilcox (Oracle) if (gfs2_glock_nq(&gh)) 605b1e71b06SSteven Whitehouse goto out_uninit; 606b1e71b06SSteven Whitehouse if (!gfs2_is_stuffed(ip)) 607d4388340SMatthew Wilcox (Oracle) mpage_readahead(rac, gfs2_block_map); 608b1e71b06SSteven Whitehouse gfs2_glock_dq(&gh); 609b1e71b06SSteven Whitehouse out_uninit: 610b1e71b06SSteven Whitehouse gfs2_holder_uninit(&gh); 611b1e71b06SSteven Whitehouse } 612b1e71b06SSteven Whitehouse 613b1e71b06SSteven Whitehouse /** 614b1e71b06SSteven Whitehouse * adjust_fs_space - Adjusts the free space available due to gfs2_grow 615b1e71b06SSteven Whitehouse * @inode: the rindex inode 616b1e71b06SSteven Whitehouse */ 61764bc06bbSAndreas Gruenbacher void adjust_fs_space(struct inode *inode) 618b1e71b06SSteven Whitehouse { 619d0a22a4bSAndreas Gruenbacher struct gfs2_sbd *sdp = GFS2_SB(inode); 6201946f70aSBenjamin Marzinski struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 6211946f70aSBenjamin Marzinski struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 622b1e71b06SSteven Whitehouse struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 623b1e71b06SSteven Whitehouse struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 6241946f70aSBenjamin Marzinski struct buffer_head *m_bh, *l_bh; 625b1e71b06SSteven Whitehouse u64 fs_total, new_free; 626b1e71b06SSteven Whitehouse 627d0a22a4bSAndreas Gruenbacher if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) 628d0a22a4bSAndreas Gruenbacher return; 629d0a22a4bSAndreas Gruenbacher 630b1e71b06SSteven Whitehouse /* Total up the file system space, according to the latest rindex. */ 631b1e71b06SSteven Whitehouse fs_total = gfs2_ri_total(sdp); 6321946f70aSBenjamin Marzinski if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 633d0a22a4bSAndreas Gruenbacher goto out; 634b1e71b06SSteven Whitehouse 635b1e71b06SSteven Whitehouse spin_lock(&sdp->sd_statfs_spin); 6361946f70aSBenjamin Marzinski gfs2_statfs_change_in(m_sc, m_bh->b_data + 6371946f70aSBenjamin Marzinski sizeof(struct gfs2_dinode)); 638b1e71b06SSteven Whitehouse if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 639b1e71b06SSteven Whitehouse new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 640b1e71b06SSteven Whitehouse else 641b1e71b06SSteven Whitehouse new_free = 0; 642b1e71b06SSteven Whitehouse spin_unlock(&sdp->sd_statfs_spin); 643b1e71b06SSteven Whitehouse fs_warn(sdp, "File system extended by %llu blocks.\n", 644b1e71b06SSteven Whitehouse (unsigned long long)new_free); 645b1e71b06SSteven Whitehouse gfs2_statfs_change(sdp, new_free, new_free, 0); 6461946f70aSBenjamin Marzinski 6471946f70aSBenjamin Marzinski if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 648d0a22a4bSAndreas Gruenbacher goto out2; 6491946f70aSBenjamin Marzinski update_statfs(sdp, m_bh, l_bh); 6501946f70aSBenjamin Marzinski brelse(l_bh); 651d0a22a4bSAndreas Gruenbacher out2: 6521946f70aSBenjamin Marzinski brelse(m_bh); 653d0a22a4bSAndreas Gruenbacher out: 654d0a22a4bSAndreas Gruenbacher sdp->sd_rindex_uptodate = 0; 655d0a22a4bSAndreas Gruenbacher gfs2_trans_end(sdp); 656b1e71b06SSteven Whitehouse } 657b1e71b06SSteven Whitehouse 658b1e71b06SSteven Whitehouse /** 659b9e03f18SBob Peterson * jdata_set_page_dirty - Page dirtying function 660b1e71b06SSteven Whitehouse * @page: The page to dirty 661b1e71b06SSteven Whitehouse * 662b1e71b06SSteven Whitehouse * Returns: 1 if it dirtyed the page, or 0 otherwise 663b1e71b06SSteven Whitehouse */ 664b1e71b06SSteven Whitehouse 665b9e03f18SBob Peterson static int jdata_set_page_dirty(struct page *page) 666b1e71b06SSteven Whitehouse { 667b1e71b06SSteven Whitehouse SetPageChecked(page); 668b1e71b06SSteven Whitehouse return __set_page_dirty_buffers(page); 669b1e71b06SSteven Whitehouse } 670b1e71b06SSteven Whitehouse 671b1e71b06SSteven Whitehouse /** 672b1e71b06SSteven Whitehouse * gfs2_bmap - Block map function 673b1e71b06SSteven Whitehouse * @mapping: Address space info 674b1e71b06SSteven Whitehouse * @lblock: The block to map 675b1e71b06SSteven Whitehouse * 676b1e71b06SSteven Whitehouse * Returns: The disk address for the block or 0 on hole or error 677b1e71b06SSteven Whitehouse */ 678b1e71b06SSteven Whitehouse 679b1e71b06SSteven Whitehouse static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 680b1e71b06SSteven Whitehouse { 681b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 682b1e71b06SSteven Whitehouse struct gfs2_holder i_gh; 683b1e71b06SSteven Whitehouse sector_t dblock = 0; 684b1e71b06SSteven Whitehouse int error; 685b1e71b06SSteven Whitehouse 686b1e71b06SSteven Whitehouse error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 687b1e71b06SSteven Whitehouse if (error) 688b1e71b06SSteven Whitehouse return 0; 689b1e71b06SSteven Whitehouse 690b1e71b06SSteven Whitehouse if (!gfs2_is_stuffed(ip)) 6917770c93aSChristoph Hellwig dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); 692b1e71b06SSteven Whitehouse 693b1e71b06SSteven Whitehouse gfs2_glock_dq_uninit(&i_gh); 694b1e71b06SSteven Whitehouse 695b1e71b06SSteven Whitehouse return dblock; 696b1e71b06SSteven Whitehouse } 697b1e71b06SSteven Whitehouse 698b1e71b06SSteven Whitehouse static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 699b1e71b06SSteven Whitehouse { 700b1e71b06SSteven Whitehouse struct gfs2_bufdata *bd; 701b1e71b06SSteven Whitehouse 702b1e71b06SSteven Whitehouse lock_buffer(bh); 703b1e71b06SSteven Whitehouse gfs2_log_lock(sdp); 704b1e71b06SSteven Whitehouse clear_buffer_dirty(bh); 705b1e71b06SSteven Whitehouse bd = bh->b_private; 706b1e71b06SSteven Whitehouse if (bd) { 707c0752aa7SBob Peterson if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 708c0752aa7SBob Peterson list_del_init(&bd->bd_list); 709b1e71b06SSteven Whitehouse else 71068cd4ce2SBob Peterson gfs2_remove_from_journal(bh, REMOVE_JDATA); 711b1e71b06SSteven Whitehouse } 712b1e71b06SSteven Whitehouse bh->b_bdev = NULL; 713b1e71b06SSteven Whitehouse clear_buffer_mapped(bh); 714b1e71b06SSteven Whitehouse clear_buffer_req(bh); 715b1e71b06SSteven Whitehouse clear_buffer_new(bh); 716b1e71b06SSteven Whitehouse gfs2_log_unlock(sdp); 717b1e71b06SSteven Whitehouse unlock_buffer(bh); 718b1e71b06SSteven Whitehouse } 719b1e71b06SSteven Whitehouse 720d47992f8SLukas Czerner static void gfs2_invalidatepage(struct page *page, unsigned int offset, 721d47992f8SLukas Czerner unsigned int length) 722b1e71b06SSteven Whitehouse { 723b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 7245c0bb97cSLukas Czerner unsigned int stop = offset + length; 72509cbfeafSKirill A. Shutemov int partial_page = (offset || length < PAGE_SIZE); 726b1e71b06SSteven Whitehouse struct buffer_head *bh, *head; 727b1e71b06SSteven Whitehouse unsigned long pos = 0; 728b1e71b06SSteven Whitehouse 729b1e71b06SSteven Whitehouse BUG_ON(!PageLocked(page)); 7305c0bb97cSLukas Czerner if (!partial_page) 731b1e71b06SSteven Whitehouse ClearPageChecked(page); 732b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) 733b1e71b06SSteven Whitehouse goto out; 734b1e71b06SSteven Whitehouse 735b1e71b06SSteven Whitehouse bh = head = page_buffers(page); 736b1e71b06SSteven Whitehouse do { 7375c0bb97cSLukas Czerner if (pos + bh->b_size > stop) 7385c0bb97cSLukas Czerner return; 7395c0bb97cSLukas Czerner 740b1e71b06SSteven Whitehouse if (offset <= pos) 741b1e71b06SSteven Whitehouse gfs2_discard(sdp, bh); 742b1e71b06SSteven Whitehouse pos += bh->b_size; 743b1e71b06SSteven Whitehouse bh = bh->b_this_page; 744b1e71b06SSteven Whitehouse } while (bh != head); 745b1e71b06SSteven Whitehouse out: 7465c0bb97cSLukas Czerner if (!partial_page) 747b1e71b06SSteven Whitehouse try_to_release_page(page, 0); 748b1e71b06SSteven Whitehouse } 749b1e71b06SSteven Whitehouse 750b1e71b06SSteven Whitehouse /** 751b1e71b06SSteven Whitehouse * gfs2_releasepage - free the metadata associated with a page 752b1e71b06SSteven Whitehouse * @page: the page that's being released 753b1e71b06SSteven Whitehouse * @gfp_mask: passed from Linux VFS, ignored by us 754b1e71b06SSteven Whitehouse * 7550ebbe4f9SAndreas Gruenbacher * Calls try_to_free_buffers() to free the buffers and put the page if the 7560ebbe4f9SAndreas Gruenbacher * buffers can be released. 757b1e71b06SSteven Whitehouse * 7580ebbe4f9SAndreas Gruenbacher * Returns: 1 if the page was put or else 0 759b1e71b06SSteven Whitehouse */ 760b1e71b06SSteven Whitehouse 761b1e71b06SSteven Whitehouse int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 762b1e71b06SSteven Whitehouse { 763009d8518SSteven Whitehouse struct address_space *mapping = page->mapping; 764009d8518SSteven Whitehouse struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 765b1e71b06SSteven Whitehouse struct buffer_head *bh, *head; 766b1e71b06SSteven Whitehouse struct gfs2_bufdata *bd; 767b1e71b06SSteven Whitehouse 768b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) 769b1e71b06SSteven Whitehouse return 0; 770b1e71b06SSteven Whitehouse 7711c185c02SAndreas Gruenbacher /* 7721c185c02SAndreas Gruenbacher * From xfs_vm_releasepage: mm accommodates an old ext3 case where 7731c185c02SAndreas Gruenbacher * clean pages might not have had the dirty bit cleared. Thus, it can 7741c185c02SAndreas Gruenbacher * send actual dirty pages to ->releasepage() via shrink_active_list(). 7751c185c02SAndreas Gruenbacher * 7761c185c02SAndreas Gruenbacher * As a workaround, we skip pages that contain dirty buffers below. 7771c185c02SAndreas Gruenbacher * Once ->releasepage isn't called on dirty pages anymore, we can warn 7781c185c02SAndreas Gruenbacher * on dirty buffers like we used to here again. 7791c185c02SAndreas Gruenbacher */ 7801c185c02SAndreas Gruenbacher 781b1e71b06SSteven Whitehouse gfs2_log_lock(sdp); 782380f7c65SSteven Whitehouse spin_lock(&sdp->sd_ail_lock); 783b1e71b06SSteven Whitehouse head = bh = page_buffers(page); 784b1e71b06SSteven Whitehouse do { 785b1e71b06SSteven Whitehouse if (atomic_read(&bh->b_count)) 786b1e71b06SSteven Whitehouse goto cannot_release; 787b1e71b06SSteven Whitehouse bd = bh->b_private; 78816ca9412SBenjamin Marzinski if (bd && bd->bd_tr) 789b1e71b06SSteven Whitehouse goto cannot_release; 7901c185c02SAndreas Gruenbacher if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 7911c185c02SAndreas Gruenbacher goto cannot_release; 792b1e71b06SSteven Whitehouse bh = bh->b_this_page; 793b1e71b06SSteven Whitehouse } while(bh != head); 794380f7c65SSteven Whitehouse spin_unlock(&sdp->sd_ail_lock); 795b1e71b06SSteven Whitehouse 796b1e71b06SSteven Whitehouse head = bh = page_buffers(page); 797b1e71b06SSteven Whitehouse do { 798b1e71b06SSteven Whitehouse bd = bh->b_private; 799b1e71b06SSteven Whitehouse if (bd) { 800b1e71b06SSteven Whitehouse gfs2_assert_warn(sdp, bd->bd_bh == bh); 801b1e71b06SSteven Whitehouse bd->bd_bh = NULL; 802b1e71b06SSteven Whitehouse bh->b_private = NULL; 803019dd669SBob Peterson /* 804019dd669SBob Peterson * The bd may still be queued as a revoke, in which 805019dd669SBob Peterson * case we must not dequeue nor free it. 806019dd669SBob Peterson */ 807019dd669SBob Peterson if (!bd->bd_blkno && !list_empty(&bd->bd_list)) 808019dd669SBob Peterson list_del_init(&bd->bd_list); 809019dd669SBob Peterson if (list_empty(&bd->bd_list)) 810b1e71b06SSteven Whitehouse kmem_cache_free(gfs2_bufdata_cachep, bd); 811e4f29206SSteven Whitehouse } 812b1e71b06SSteven Whitehouse 813b1e71b06SSteven Whitehouse bh = bh->b_this_page; 814b1e71b06SSteven Whitehouse } while (bh != head); 815e4f29206SSteven Whitehouse gfs2_log_unlock(sdp); 816b1e71b06SSteven Whitehouse 817b1e71b06SSteven Whitehouse return try_to_free_buffers(page); 8188f065d36SSteven Whitehouse 819b1e71b06SSteven Whitehouse cannot_release: 820380f7c65SSteven Whitehouse spin_unlock(&sdp->sd_ail_lock); 821b1e71b06SSteven Whitehouse gfs2_log_unlock(sdp); 822b1e71b06SSteven Whitehouse return 0; 823b1e71b06SSteven Whitehouse } 824b1e71b06SSteven Whitehouse 825eadd7535SChristoph Hellwig static const struct address_space_operations gfs2_aops = { 8269d358143SSteven Whitehouse .writepage = gfs2_writepage, 82745138990SSteven Whitehouse .writepages = gfs2_writepages, 828b1e71b06SSteven Whitehouse .readpage = gfs2_readpage, 829d4388340SMatthew Wilcox (Oracle) .readahead = gfs2_readahead, 830b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 831b1e71b06SSteven Whitehouse .invalidatepage = gfs2_invalidatepage, 832b1e71b06SSteven Whitehouse .releasepage = gfs2_releasepage, 833967bcc91SAndreas Gruenbacher .direct_IO = noop_direct_IO, 834b1e71b06SSteven Whitehouse .migratepage = buffer_migrate_page, 835b1e71b06SSteven Whitehouse .is_partially_uptodate = block_is_partially_uptodate, 836aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 837b1e71b06SSteven Whitehouse }; 838b1e71b06SSteven Whitehouse 839b1e71b06SSteven Whitehouse static const struct address_space_operations gfs2_jdata_aops = { 840b1e71b06SSteven Whitehouse .writepage = gfs2_jdata_writepage, 841b1e71b06SSteven Whitehouse .writepages = gfs2_jdata_writepages, 842b1e71b06SSteven Whitehouse .readpage = gfs2_readpage, 843d4388340SMatthew Wilcox (Oracle) .readahead = gfs2_readahead, 844b9e03f18SBob Peterson .set_page_dirty = jdata_set_page_dirty, 845b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 846b1e71b06SSteven Whitehouse .invalidatepage = gfs2_invalidatepage, 847b1e71b06SSteven Whitehouse .releasepage = gfs2_releasepage, 848b1e71b06SSteven Whitehouse .is_partially_uptodate = block_is_partially_uptodate, 849aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 850b1e71b06SSteven Whitehouse }; 851b1e71b06SSteven Whitehouse 852b1e71b06SSteven Whitehouse void gfs2_set_aops(struct inode *inode) 853b1e71b06SSteven Whitehouse { 854eadd7535SChristoph Hellwig if (gfs2_is_jdata(GFS2_I(inode))) 855b1e71b06SSteven Whitehouse inode->i_mapping->a_ops = &gfs2_jdata_aops; 856b1e71b06SSteven Whitehouse else 857eadd7535SChristoph Hellwig inode->i_mapping->a_ops = &gfs2_aops; 858b1e71b06SSteven Whitehouse } 859