1b1e71b06SSteven Whitehouse /* 2b1e71b06SSteven Whitehouse * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3b1e71b06SSteven Whitehouse * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4b1e71b06SSteven Whitehouse * 5b1e71b06SSteven Whitehouse * This copyrighted material is made available to anyone wishing to use, 6b1e71b06SSteven Whitehouse * modify, copy, or redistribute it subject to the terms and conditions 7b1e71b06SSteven Whitehouse * of the GNU General Public License version 2. 8b1e71b06SSteven Whitehouse */ 9b1e71b06SSteven Whitehouse 10b1e71b06SSteven Whitehouse #include <linux/sched.h> 11b1e71b06SSteven Whitehouse #include <linux/slab.h> 12b1e71b06SSteven Whitehouse #include <linux/spinlock.h> 13b1e71b06SSteven Whitehouse #include <linux/completion.h> 14b1e71b06SSteven Whitehouse #include <linux/buffer_head.h> 15b1e71b06SSteven Whitehouse #include <linux/pagemap.h> 16b1e71b06SSteven Whitehouse #include <linux/pagevec.h> 17b1e71b06SSteven Whitehouse #include <linux/mpage.h> 18b1e71b06SSteven Whitehouse #include <linux/fs.h> 19b1e71b06SSteven Whitehouse #include <linux/writeback.h> 20b1e71b06SSteven Whitehouse #include <linux/swap.h> 21b1e71b06SSteven Whitehouse #include <linux/gfs2_ondisk.h> 22b1e71b06SSteven Whitehouse #include <linux/backing-dev.h> 23e2e40f2cSChristoph Hellwig #include <linux/uio.h> 24774016b2SSteven Whitehouse #include <trace/events/writeback.h> 2564bc06bbSAndreas Gruenbacher #include <linux/sched/signal.h> 26b1e71b06SSteven Whitehouse 27b1e71b06SSteven Whitehouse #include "gfs2.h" 28b1e71b06SSteven Whitehouse #include "incore.h" 29b1e71b06SSteven Whitehouse #include "bmap.h" 30b1e71b06SSteven Whitehouse #include "glock.h" 31b1e71b06SSteven Whitehouse #include "inode.h" 32b1e71b06SSteven Whitehouse #include "log.h" 33b1e71b06SSteven Whitehouse #include "meta_io.h" 34b1e71b06SSteven Whitehouse #include "quota.h" 35b1e71b06SSteven Whitehouse #include "trans.h" 36b1e71b06SSteven Whitehouse #include "rgrp.h" 37b1e71b06SSteven Whitehouse #include "super.h" 38b1e71b06SSteven Whitehouse #include "util.h" 39b1e71b06SSteven Whitehouse #include "glops.h" 4064bc06bbSAndreas Gruenbacher #include "aops.h" 41b1e71b06SSteven Whitehouse 42b1e71b06SSteven Whitehouse 4364bc06bbSAndreas Gruenbacher void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 4488b65ce5SAndreas Gruenbacher unsigned int from, unsigned int len) 45b1e71b06SSteven Whitehouse { 46b1e71b06SSteven Whitehouse struct buffer_head *head = page_buffers(page); 47b1e71b06SSteven Whitehouse unsigned int bsize = head->b_size; 48b1e71b06SSteven Whitehouse struct buffer_head *bh; 4988b65ce5SAndreas Gruenbacher unsigned int to = from + len; 50b1e71b06SSteven Whitehouse unsigned int start, end; 51b1e71b06SSteven Whitehouse 52b1e71b06SSteven Whitehouse for (bh = head, start = 0; bh != head || !start; 53b1e71b06SSteven Whitehouse bh = bh->b_this_page, start = end) { 54b1e71b06SSteven Whitehouse end = start + bsize; 5588b65ce5SAndreas Gruenbacher if (end <= from) 56b1e71b06SSteven Whitehouse continue; 5788b65ce5SAndreas Gruenbacher if (start >= to) 5888b65ce5SAndreas Gruenbacher break; 59b1e71b06SSteven Whitehouse set_buffer_uptodate(bh); 60350a9b0aSSteven Whitehouse gfs2_trans_add_data(ip->i_gl, bh); 61b1e71b06SSteven Whitehouse } 62b1e71b06SSteven Whitehouse } 63b1e71b06SSteven Whitehouse 64b1e71b06SSteven Whitehouse /** 65b1e71b06SSteven Whitehouse * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 66b1e71b06SSteven Whitehouse * @inode: The inode 67b1e71b06SSteven Whitehouse * @lblock: The block number to look up 68b1e71b06SSteven Whitehouse * @bh_result: The buffer head to return the result in 69b1e71b06SSteven Whitehouse * @create: Non-zero if we may add block to the file 70b1e71b06SSteven Whitehouse * 71b1e71b06SSteven Whitehouse * Returns: errno 72b1e71b06SSteven Whitehouse */ 73b1e71b06SSteven Whitehouse 74b1e71b06SSteven Whitehouse static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 75b1e71b06SSteven Whitehouse struct buffer_head *bh_result, int create) 76b1e71b06SSteven Whitehouse { 77b1e71b06SSteven Whitehouse int error; 78b1e71b06SSteven Whitehouse 79b1e71b06SSteven Whitehouse error = gfs2_block_map(inode, lblock, bh_result, 0); 80b1e71b06SSteven Whitehouse if (error) 81b1e71b06SSteven Whitehouse return error; 82b1e71b06SSteven Whitehouse if (!buffer_mapped(bh_result)) 83b1e71b06SSteven Whitehouse return -EIO; 84b1e71b06SSteven Whitehouse return 0; 85b1e71b06SSteven Whitehouse } 86b1e71b06SSteven Whitehouse 87b1e71b06SSteven Whitehouse /** 88b1e71b06SSteven Whitehouse * gfs2_writepage_common - Common bits of writepage 89b1e71b06SSteven Whitehouse * @page: The page to be written 90b1e71b06SSteven Whitehouse * @wbc: The writeback control 91b1e71b06SSteven Whitehouse * 92b1e71b06SSteven Whitehouse * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 93b1e71b06SSteven Whitehouse */ 94b1e71b06SSteven Whitehouse 95b1e71b06SSteven Whitehouse static int gfs2_writepage_common(struct page *page, 96b1e71b06SSteven Whitehouse struct writeback_control *wbc) 97b1e71b06SSteven Whitehouse { 98b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 99b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 100b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 101b1e71b06SSteven Whitehouse loff_t i_size = i_size_read(inode); 10209cbfeafSKirill A. Shutemov pgoff_t end_index = i_size >> PAGE_SHIFT; 103b1e71b06SSteven Whitehouse unsigned offset; 104b1e71b06SSteven Whitehouse 105b1e71b06SSteven Whitehouse if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 106b1e71b06SSteven Whitehouse goto out; 107b1e71b06SSteven Whitehouse if (current->journal_info) 108b1e71b06SSteven Whitehouse goto redirty; 109b1e71b06SSteven Whitehouse /* Is the page fully outside i_size? (truncate in progress) */ 11009cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 111b1e71b06SSteven Whitehouse if (page->index > end_index || (page->index == end_index && !offset)) { 11209cbfeafSKirill A. Shutemov page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 113b1e71b06SSteven Whitehouse goto out; 114b1e71b06SSteven Whitehouse } 115b1e71b06SSteven Whitehouse return 1; 116b1e71b06SSteven Whitehouse redirty: 117b1e71b06SSteven Whitehouse redirty_page_for_writepage(wbc, page); 118b1e71b06SSteven Whitehouse out: 119b1e71b06SSteven Whitehouse unlock_page(page); 120b1e71b06SSteven Whitehouse return 0; 121b1e71b06SSteven Whitehouse } 122b1e71b06SSteven Whitehouse 123b1e71b06SSteven Whitehouse /** 1249d358143SSteven Whitehouse * gfs2_writepage - Write page for writeback mappings 125b1e71b06SSteven Whitehouse * @page: The page 126b1e71b06SSteven Whitehouse * @wbc: The writeback control 127b1e71b06SSteven Whitehouse * 128b1e71b06SSteven Whitehouse */ 129b1e71b06SSteven Whitehouse 1309d358143SSteven Whitehouse static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 131b1e71b06SSteven Whitehouse { 132b1e71b06SSteven Whitehouse int ret; 133b1e71b06SSteven Whitehouse 134b1e71b06SSteven Whitehouse ret = gfs2_writepage_common(page, wbc); 135b1e71b06SSteven Whitehouse if (ret <= 0) 136b1e71b06SSteven Whitehouse return ret; 137b1e71b06SSteven Whitehouse 13830116ff6SSteven Whitehouse return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 139b1e71b06SSteven Whitehouse } 140b1e71b06SSteven Whitehouse 141fd4c5748SBenjamin Marzinski /* This is the same as calling block_write_full_page, but it also 142fd4c5748SBenjamin Marzinski * writes pages outside of i_size 143fd4c5748SBenjamin Marzinski */ 144c548a1c1SAndrew Price static int gfs2_write_full_page(struct page *page, get_block_t *get_block, 145fd4c5748SBenjamin Marzinski struct writeback_control *wbc) 146fd4c5748SBenjamin Marzinski { 147fd4c5748SBenjamin Marzinski struct inode * const inode = page->mapping->host; 148fd4c5748SBenjamin Marzinski loff_t i_size = i_size_read(inode); 149fd4c5748SBenjamin Marzinski const pgoff_t end_index = i_size >> PAGE_SHIFT; 150fd4c5748SBenjamin Marzinski unsigned offset; 151fd4c5748SBenjamin Marzinski 152fd4c5748SBenjamin Marzinski /* 153fd4c5748SBenjamin Marzinski * The page straddles i_size. It must be zeroed out on each and every 154fd4c5748SBenjamin Marzinski * writepage invocation because it may be mmapped. "A file is mapped 155fd4c5748SBenjamin Marzinski * in multiples of the page size. For a file that is not a multiple of 156fd4c5748SBenjamin Marzinski * the page size, the remaining memory is zeroed when mapped, and 157fd4c5748SBenjamin Marzinski * writes to that region are not written out to the file." 158fd4c5748SBenjamin Marzinski */ 159fd4c5748SBenjamin Marzinski offset = i_size & (PAGE_SIZE-1); 160fd4c5748SBenjamin Marzinski if (page->index == end_index && offset) 161fd4c5748SBenjamin Marzinski zero_user_segment(page, offset, PAGE_SIZE); 162fd4c5748SBenjamin Marzinski 163fd4c5748SBenjamin Marzinski return __block_write_full_page(inode, page, get_block, wbc, 164fd4c5748SBenjamin Marzinski end_buffer_async_write); 165fd4c5748SBenjamin Marzinski } 166fd4c5748SBenjamin Marzinski 167b1e71b06SSteven Whitehouse /** 168b1e71b06SSteven Whitehouse * __gfs2_jdata_writepage - The core of jdata writepage 169b1e71b06SSteven Whitehouse * @page: The page to write 170b1e71b06SSteven Whitehouse * @wbc: The writeback control 171b1e71b06SSteven Whitehouse * 172b1e71b06SSteven Whitehouse * This is shared between writepage and writepages and implements the 173b1e71b06SSteven Whitehouse * core of the writepage operation. If a transaction is required then 174b1e71b06SSteven Whitehouse * PageChecked will have been set and the transaction will have 175b1e71b06SSteven Whitehouse * already been started before this is called. 176b1e71b06SSteven Whitehouse */ 177b1e71b06SSteven Whitehouse 178b1e71b06SSteven Whitehouse static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 179b1e71b06SSteven Whitehouse { 180b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 181b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 182b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 183b1e71b06SSteven Whitehouse 184b1e71b06SSteven Whitehouse if (PageChecked(page)) { 185b1e71b06SSteven Whitehouse ClearPageChecked(page); 186b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) { 187b1e71b06SSteven Whitehouse create_empty_buffers(page, inode->i_sb->s_blocksize, 18847a9a527SFabian Frederick BIT(BH_Dirty)|BIT(BH_Uptodate)); 189b1e71b06SSteven Whitehouse } 19088b65ce5SAndreas Gruenbacher gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize); 191b1e71b06SSteven Whitehouse } 192fd4c5748SBenjamin Marzinski return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 193b1e71b06SSteven Whitehouse } 194b1e71b06SSteven Whitehouse 195b1e71b06SSteven Whitehouse /** 196b1e71b06SSteven Whitehouse * gfs2_jdata_writepage - Write complete page 197b1e71b06SSteven Whitehouse * @page: Page to write 1981272574bSFabian Frederick * @wbc: The writeback control 199b1e71b06SSteven Whitehouse * 200b1e71b06SSteven Whitehouse * Returns: errno 201b1e71b06SSteven Whitehouse * 202b1e71b06SSteven Whitehouse */ 203b1e71b06SSteven Whitehouse 204b1e71b06SSteven Whitehouse static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 205b1e71b06SSteven Whitehouse { 206b1e71b06SSteven Whitehouse struct inode *inode = page->mapping->host; 207fd4c5748SBenjamin Marzinski struct gfs2_inode *ip = GFS2_I(inode); 208b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 209b1e71b06SSteven Whitehouse int ret; 210b1e71b06SSteven Whitehouse 211fd4c5748SBenjamin Marzinski if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 212fd4c5748SBenjamin Marzinski goto out; 213fd4c5748SBenjamin Marzinski if (PageChecked(page) || current->journal_info) 214b1e71b06SSteven Whitehouse goto out_ignore; 215b1e71b06SSteven Whitehouse ret = __gfs2_jdata_writepage(page, wbc); 216b1e71b06SSteven Whitehouse return ret; 217b1e71b06SSteven Whitehouse 218b1e71b06SSteven Whitehouse out_ignore: 219b1e71b06SSteven Whitehouse redirty_page_for_writepage(wbc, page); 220fd4c5748SBenjamin Marzinski out: 221b1e71b06SSteven Whitehouse unlock_page(page); 222b1e71b06SSteven Whitehouse return 0; 223b1e71b06SSteven Whitehouse } 224b1e71b06SSteven Whitehouse 225b1e71b06SSteven Whitehouse /** 22645138990SSteven Whitehouse * gfs2_writepages - Write a bunch of dirty pages back to disk 227b1e71b06SSteven Whitehouse * @mapping: The mapping to write 228b1e71b06SSteven Whitehouse * @wbc: Write-back control 229b1e71b06SSteven Whitehouse * 23045138990SSteven Whitehouse * Used for both ordered and writeback modes. 231b1e71b06SSteven Whitehouse */ 23245138990SSteven Whitehouse static int gfs2_writepages(struct address_space *mapping, 233b1e71b06SSteven Whitehouse struct writeback_control *wbc) 234b1e71b06SSteven Whitehouse { 235b066a4eeSAbhi Das struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 236b066a4eeSAbhi Das int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 237b066a4eeSAbhi Das 238b066a4eeSAbhi Das /* 239b066a4eeSAbhi Das * Even if we didn't write any pages here, we might still be holding 240b066a4eeSAbhi Das * dirty pages in the ail. We forcibly flush the ail because we don't 241b066a4eeSAbhi Das * want balance_dirty_pages() to loop indefinitely trying to write out 242b066a4eeSAbhi Das * pages held in the ail that it can't find. 243b066a4eeSAbhi Das */ 244b066a4eeSAbhi Das if (ret == 0) 245b066a4eeSAbhi Das set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 246b066a4eeSAbhi Das 247b066a4eeSAbhi Das return ret; 248b1e71b06SSteven Whitehouse } 249b1e71b06SSteven Whitehouse 250b1e71b06SSteven Whitehouse /** 251b1e71b06SSteven Whitehouse * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 252b1e71b06SSteven Whitehouse * @mapping: The mapping 253b1e71b06SSteven Whitehouse * @wbc: The writeback control 254b1e71b06SSteven Whitehouse * @pvec: The vector of pages 255b1e71b06SSteven Whitehouse * @nr_pages: The number of pages to write 2561272574bSFabian Frederick * @done_index: Page index 257b1e71b06SSteven Whitehouse * 258b1e71b06SSteven Whitehouse * Returns: non-zero if loop should terminate, zero otherwise 259b1e71b06SSteven Whitehouse */ 260b1e71b06SSteven Whitehouse 261b1e71b06SSteven Whitehouse static int gfs2_write_jdata_pagevec(struct address_space *mapping, 262b1e71b06SSteven Whitehouse struct writeback_control *wbc, 263b1e71b06SSteven Whitehouse struct pagevec *pvec, 2649aa01593SAndreas Gruenbacher int nr_pages, 265774016b2SSteven Whitehouse pgoff_t *done_index) 266b1e71b06SSteven Whitehouse { 267b1e71b06SSteven Whitehouse struct inode *inode = mapping->host; 268b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 26909cbfeafSKirill A. Shutemov unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); 270b1e71b06SSteven Whitehouse int i; 271b1e71b06SSteven Whitehouse int ret; 272b1e71b06SSteven Whitehouse 273b1e71b06SSteven Whitehouse ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 274b1e71b06SSteven Whitehouse if (ret < 0) 275b1e71b06SSteven Whitehouse return ret; 276b1e71b06SSteven Whitehouse 277b1e71b06SSteven Whitehouse for(i = 0; i < nr_pages; i++) { 278b1e71b06SSteven Whitehouse struct page *page = pvec->pages[i]; 279b1e71b06SSteven Whitehouse 280774016b2SSteven Whitehouse *done_index = page->index; 281774016b2SSteven Whitehouse 282b1e71b06SSteven Whitehouse lock_page(page); 283b1e71b06SSteven Whitehouse 284b1e71b06SSteven Whitehouse if (unlikely(page->mapping != mapping)) { 285774016b2SSteven Whitehouse continue_unlock: 286b1e71b06SSteven Whitehouse unlock_page(page); 287b1e71b06SSteven Whitehouse continue; 288b1e71b06SSteven Whitehouse } 289b1e71b06SSteven Whitehouse 290774016b2SSteven Whitehouse if (!PageDirty(page)) { 291774016b2SSteven Whitehouse /* someone wrote it for us */ 292774016b2SSteven Whitehouse goto continue_unlock; 293b1e71b06SSteven Whitehouse } 294b1e71b06SSteven Whitehouse 295774016b2SSteven Whitehouse if (PageWriteback(page)) { 296b1e71b06SSteven Whitehouse if (wbc->sync_mode != WB_SYNC_NONE) 297b1e71b06SSteven Whitehouse wait_on_page_writeback(page); 298774016b2SSteven Whitehouse else 299774016b2SSteven Whitehouse goto continue_unlock; 300b1e71b06SSteven Whitehouse } 301b1e71b06SSteven Whitehouse 302774016b2SSteven Whitehouse BUG_ON(PageWriteback(page)); 303774016b2SSteven Whitehouse if (!clear_page_dirty_for_io(page)) 304774016b2SSteven Whitehouse goto continue_unlock; 305774016b2SSteven Whitehouse 306de1414a6SChristoph Hellwig trace_wbc_writepage(wbc, inode_to_bdi(inode)); 307b1e71b06SSteven Whitehouse 308b1e71b06SSteven Whitehouse ret = __gfs2_jdata_writepage(page, wbc); 309774016b2SSteven Whitehouse if (unlikely(ret)) { 310774016b2SSteven Whitehouse if (ret == AOP_WRITEPAGE_ACTIVATE) { 311774016b2SSteven Whitehouse unlock_page(page); 312774016b2SSteven Whitehouse ret = 0; 313774016b2SSteven Whitehouse } else { 314b1e71b06SSteven Whitehouse 315774016b2SSteven Whitehouse /* 316774016b2SSteven Whitehouse * done_index is set past this page, 317774016b2SSteven Whitehouse * so media errors will not choke 318774016b2SSteven Whitehouse * background writeout for the entire 319774016b2SSteven Whitehouse * file. This has consequences for 320774016b2SSteven Whitehouse * range_cyclic semantics (ie. it may 321774016b2SSteven Whitehouse * not be suitable for data integrity 322774016b2SSteven Whitehouse * writeout). 323774016b2SSteven Whitehouse */ 324774016b2SSteven Whitehouse *done_index = page->index + 1; 325b1e71b06SSteven Whitehouse ret = 1; 326774016b2SSteven Whitehouse break; 327774016b2SSteven Whitehouse } 328774016b2SSteven Whitehouse } 329774016b2SSteven Whitehouse 330774016b2SSteven Whitehouse /* 331774016b2SSteven Whitehouse * We stop writing back only if we are not doing 332774016b2SSteven Whitehouse * integrity sync. In case of integrity sync we have to 333774016b2SSteven Whitehouse * keep going until we have written all the pages 334774016b2SSteven Whitehouse * we tagged for writeback prior to entering this loop. 335774016b2SSteven Whitehouse */ 336774016b2SSteven Whitehouse if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 337774016b2SSteven Whitehouse ret = 1; 338774016b2SSteven Whitehouse break; 339774016b2SSteven Whitehouse } 340774016b2SSteven Whitehouse 341b1e71b06SSteven Whitehouse } 342b1e71b06SSteven Whitehouse gfs2_trans_end(sdp); 343b1e71b06SSteven Whitehouse return ret; 344b1e71b06SSteven Whitehouse } 345b1e71b06SSteven Whitehouse 346b1e71b06SSteven Whitehouse /** 347b1e71b06SSteven Whitehouse * gfs2_write_cache_jdata - Like write_cache_pages but different 348b1e71b06SSteven Whitehouse * @mapping: The mapping to write 349b1e71b06SSteven Whitehouse * @wbc: The writeback control 350b1e71b06SSteven Whitehouse * 351b1e71b06SSteven Whitehouse * The reason that we use our own function here is that we need to 352b1e71b06SSteven Whitehouse * start transactions before we grab page locks. This allows us 353b1e71b06SSteven Whitehouse * to get the ordering right. 354b1e71b06SSteven Whitehouse */ 355b1e71b06SSteven Whitehouse 356b1e71b06SSteven Whitehouse static int gfs2_write_cache_jdata(struct address_space *mapping, 357b1e71b06SSteven Whitehouse struct writeback_control *wbc) 358b1e71b06SSteven Whitehouse { 359b1e71b06SSteven Whitehouse int ret = 0; 360b1e71b06SSteven Whitehouse int done = 0; 361b1e71b06SSteven Whitehouse struct pagevec pvec; 362b1e71b06SSteven Whitehouse int nr_pages; 363774016b2SSteven Whitehouse pgoff_t uninitialized_var(writeback_index); 364b1e71b06SSteven Whitehouse pgoff_t index; 365b1e71b06SSteven Whitehouse pgoff_t end; 366774016b2SSteven Whitehouse pgoff_t done_index; 367774016b2SSteven Whitehouse int cycled; 368b1e71b06SSteven Whitehouse int range_whole = 0; 36910bbd235SMatthew Wilcox xa_mark_t tag; 370b1e71b06SSteven Whitehouse 37186679820SMel Gorman pagevec_init(&pvec); 372b1e71b06SSteven Whitehouse if (wbc->range_cyclic) { 373774016b2SSteven Whitehouse writeback_index = mapping->writeback_index; /* prev offset */ 374774016b2SSteven Whitehouse index = writeback_index; 375774016b2SSteven Whitehouse if (index == 0) 376774016b2SSteven Whitehouse cycled = 1; 377774016b2SSteven Whitehouse else 378774016b2SSteven Whitehouse cycled = 0; 379b1e71b06SSteven Whitehouse end = -1; 380b1e71b06SSteven Whitehouse } else { 38109cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT; 38209cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT; 383b1e71b06SSteven Whitehouse if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 384b1e71b06SSteven Whitehouse range_whole = 1; 385774016b2SSteven Whitehouse cycled = 1; /* ignore range_cyclic tests */ 386b1e71b06SSteven Whitehouse } 387774016b2SSteven Whitehouse if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 388774016b2SSteven Whitehouse tag = PAGECACHE_TAG_TOWRITE; 389774016b2SSteven Whitehouse else 390774016b2SSteven Whitehouse tag = PAGECACHE_TAG_DIRTY; 391b1e71b06SSteven Whitehouse 392b1e71b06SSteven Whitehouse retry: 393774016b2SSteven Whitehouse if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 394774016b2SSteven Whitehouse tag_pages_for_writeback(mapping, index, end); 395774016b2SSteven Whitehouse done_index = index; 396774016b2SSteven Whitehouse while (!done && (index <= end)) { 397d2bc5b3cSJan Kara nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 39867fd707fSJan Kara tag); 399774016b2SSteven Whitehouse if (nr_pages == 0) 400774016b2SSteven Whitehouse break; 401774016b2SSteven Whitehouse 4029aa01593SAndreas Gruenbacher ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); 403b1e71b06SSteven Whitehouse if (ret) 404b1e71b06SSteven Whitehouse done = 1; 405b1e71b06SSteven Whitehouse if (ret > 0) 406b1e71b06SSteven Whitehouse ret = 0; 407b1e71b06SSteven Whitehouse pagevec_release(&pvec); 408b1e71b06SSteven Whitehouse cond_resched(); 409b1e71b06SSteven Whitehouse } 410b1e71b06SSteven Whitehouse 411774016b2SSteven Whitehouse if (!cycled && !done) { 412b1e71b06SSteven Whitehouse /* 413774016b2SSteven Whitehouse * range_cyclic: 414b1e71b06SSteven Whitehouse * We hit the last page and there is more work to be done: wrap 415b1e71b06SSteven Whitehouse * back to the start of the file 416b1e71b06SSteven Whitehouse */ 417774016b2SSteven Whitehouse cycled = 1; 418b1e71b06SSteven Whitehouse index = 0; 419774016b2SSteven Whitehouse end = writeback_index - 1; 420b1e71b06SSteven Whitehouse goto retry; 421b1e71b06SSteven Whitehouse } 422b1e71b06SSteven Whitehouse 423b1e71b06SSteven Whitehouse if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 424774016b2SSteven Whitehouse mapping->writeback_index = done_index; 425774016b2SSteven Whitehouse 426b1e71b06SSteven Whitehouse return ret; 427b1e71b06SSteven Whitehouse } 428b1e71b06SSteven Whitehouse 429b1e71b06SSteven Whitehouse 430b1e71b06SSteven Whitehouse /** 431b1e71b06SSteven Whitehouse * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 432b1e71b06SSteven Whitehouse * @mapping: The mapping to write 433b1e71b06SSteven Whitehouse * @wbc: The writeback control 434b1e71b06SSteven Whitehouse * 435b1e71b06SSteven Whitehouse */ 436b1e71b06SSteven Whitehouse 437b1e71b06SSteven Whitehouse static int gfs2_jdata_writepages(struct address_space *mapping, 438b1e71b06SSteven Whitehouse struct writeback_control *wbc) 439b1e71b06SSteven Whitehouse { 440b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 441b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 442b1e71b06SSteven Whitehouse int ret; 443b1e71b06SSteven Whitehouse 444b1e71b06SSteven Whitehouse ret = gfs2_write_cache_jdata(mapping, wbc); 445b1e71b06SSteven Whitehouse if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 446805c0907SBob Peterson gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 447805c0907SBob Peterson GFS2_LFC_JDATA_WPAGES); 448b1e71b06SSteven Whitehouse ret = gfs2_write_cache_jdata(mapping, wbc); 449b1e71b06SSteven Whitehouse } 450b1e71b06SSteven Whitehouse return ret; 451b1e71b06SSteven Whitehouse } 452b1e71b06SSteven Whitehouse 453b1e71b06SSteven Whitehouse /** 454b1e71b06SSteven Whitehouse * stuffed_readpage - Fill in a Linux page with stuffed file data 455b1e71b06SSteven Whitehouse * @ip: the inode 456b1e71b06SSteven Whitehouse * @page: the page 457b1e71b06SSteven Whitehouse * 458b1e71b06SSteven Whitehouse * Returns: errno 459b1e71b06SSteven Whitehouse */ 460b1e71b06SSteven Whitehouse 46164bc06bbSAndreas Gruenbacher int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 462b1e71b06SSteven Whitehouse { 463b1e71b06SSteven Whitehouse struct buffer_head *dibh; 464602c89d2SSteven Whitehouse u64 dsize = i_size_read(&ip->i_inode); 465b1e71b06SSteven Whitehouse void *kaddr; 466b1e71b06SSteven Whitehouse int error; 467b1e71b06SSteven Whitehouse 468b1e71b06SSteven Whitehouse /* 469b1e71b06SSteven Whitehouse * Due to the order of unstuffing files and ->fault(), we can be 470b1e71b06SSteven Whitehouse * asked for a zero page in the case of a stuffed file being extended, 471b1e71b06SSteven Whitehouse * so we need to supply one here. It doesn't happen often. 472b1e71b06SSteven Whitehouse */ 473b1e71b06SSteven Whitehouse if (unlikely(page->index)) { 47409cbfeafSKirill A. Shutemov zero_user(page, 0, PAGE_SIZE); 475b1e71b06SSteven Whitehouse SetPageUptodate(page); 476b1e71b06SSteven Whitehouse return 0; 477b1e71b06SSteven Whitehouse } 478b1e71b06SSteven Whitehouse 479b1e71b06SSteven Whitehouse error = gfs2_meta_inode_buffer(ip, &dibh); 480b1e71b06SSteven Whitehouse if (error) 481b1e71b06SSteven Whitehouse return error; 482b1e71b06SSteven Whitehouse 483d9349285SCong Wang kaddr = kmap_atomic(page); 484235628c5SAndreas Gruenbacher if (dsize > gfs2_max_stuffed_size(ip)) 485235628c5SAndreas Gruenbacher dsize = gfs2_max_stuffed_size(ip); 486602c89d2SSteven Whitehouse memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 48709cbfeafSKirill A. Shutemov memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 488d9349285SCong Wang kunmap_atomic(kaddr); 489b1e71b06SSteven Whitehouse flush_dcache_page(page); 490b1e71b06SSteven Whitehouse brelse(dibh); 491b1e71b06SSteven Whitehouse SetPageUptodate(page); 492b1e71b06SSteven Whitehouse 493b1e71b06SSteven Whitehouse return 0; 494b1e71b06SSteven Whitehouse } 495b1e71b06SSteven Whitehouse 496b1e71b06SSteven Whitehouse 497b1e71b06SSteven Whitehouse /** 498b1e71b06SSteven Whitehouse * __gfs2_readpage - readpage 499b1e71b06SSteven Whitehouse * @file: The file to read a page for 500b1e71b06SSteven Whitehouse * @page: The page to read 501b1e71b06SSteven Whitehouse * 5029db115a0SAndreas Gruenbacher * This is the core of gfs2's readpage. It's used by the internal file 5039db115a0SAndreas Gruenbacher * reading code as in that case we already hold the glock. Also it's 504b1e71b06SSteven Whitehouse * called by gfs2_readpage() once the required lock has been granted. 505b1e71b06SSteven Whitehouse */ 506b1e71b06SSteven Whitehouse 507b1e71b06SSteven Whitehouse static int __gfs2_readpage(void *file, struct page *page) 508b1e71b06SSteven Whitehouse { 509b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(page->mapping->host); 510b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 511f95cbb44SAndreas Gruenbacher 512b1e71b06SSteven Whitehouse int error; 513b1e71b06SSteven Whitehouse 514f95cbb44SAndreas Gruenbacher if (i_blocksize(page->mapping->host) == PAGE_SIZE && 515f95cbb44SAndreas Gruenbacher !page_has_buffers(page)) { 516f95cbb44SAndreas Gruenbacher error = iomap_readpage(page, &gfs2_iomap_ops); 517f95cbb44SAndreas Gruenbacher } else if (gfs2_is_stuffed(ip)) { 518b1e71b06SSteven Whitehouse error = stuffed_readpage(ip, page); 519b1e71b06SSteven Whitehouse unlock_page(page); 520b1e71b06SSteven Whitehouse } else { 521b1e71b06SSteven Whitehouse error = mpage_readpage(page, gfs2_block_map); 522b1e71b06SSteven Whitehouse } 523b1e71b06SSteven Whitehouse 524b1e71b06SSteven Whitehouse if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 525b1e71b06SSteven Whitehouse return -EIO; 526b1e71b06SSteven Whitehouse 527b1e71b06SSteven Whitehouse return error; 528b1e71b06SSteven Whitehouse } 529b1e71b06SSteven Whitehouse 530b1e71b06SSteven Whitehouse /** 531b1e71b06SSteven Whitehouse * gfs2_readpage - read a page of a file 532b1e71b06SSteven Whitehouse * @file: The file to read 533b1e71b06SSteven Whitehouse * @page: The page of the file 534b1e71b06SSteven Whitehouse * 535b1e71b06SSteven Whitehouse * This deals with the locking required. We have to unlock and 536b1e71b06SSteven Whitehouse * relock the page in order to get the locking in the right 537b1e71b06SSteven Whitehouse * order. 538b1e71b06SSteven Whitehouse */ 539b1e71b06SSteven Whitehouse 540b1e71b06SSteven Whitehouse static int gfs2_readpage(struct file *file, struct page *page) 541b1e71b06SSteven Whitehouse { 542b1e71b06SSteven Whitehouse struct address_space *mapping = page->mapping; 543b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 544b1e71b06SSteven Whitehouse struct gfs2_holder gh; 545b1e71b06SSteven Whitehouse int error; 546b1e71b06SSteven Whitehouse 547b1e71b06SSteven Whitehouse unlock_page(page); 548b1e71b06SSteven Whitehouse gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 549b1e71b06SSteven Whitehouse error = gfs2_glock_nq(&gh); 550b1e71b06SSteven Whitehouse if (unlikely(error)) 551b1e71b06SSteven Whitehouse goto out; 552b1e71b06SSteven Whitehouse error = AOP_TRUNCATED_PAGE; 553b1e71b06SSteven Whitehouse lock_page(page); 554b1e71b06SSteven Whitehouse if (page->mapping == mapping && !PageUptodate(page)) 555b1e71b06SSteven Whitehouse error = __gfs2_readpage(file, page); 556b1e71b06SSteven Whitehouse else 557b1e71b06SSteven Whitehouse unlock_page(page); 558b1e71b06SSteven Whitehouse gfs2_glock_dq(&gh); 559b1e71b06SSteven Whitehouse out: 560b1e71b06SSteven Whitehouse gfs2_holder_uninit(&gh); 561b1e71b06SSteven Whitehouse if (error && error != AOP_TRUNCATED_PAGE) 562b1e71b06SSteven Whitehouse lock_page(page); 563b1e71b06SSteven Whitehouse return error; 564b1e71b06SSteven Whitehouse } 565b1e71b06SSteven Whitehouse 566b1e71b06SSteven Whitehouse /** 567b1e71b06SSteven Whitehouse * gfs2_internal_read - read an internal file 568b1e71b06SSteven Whitehouse * @ip: The gfs2 inode 569b1e71b06SSteven Whitehouse * @buf: The buffer to fill 570b1e71b06SSteven Whitehouse * @pos: The file position 571b1e71b06SSteven Whitehouse * @size: The amount to read 572b1e71b06SSteven Whitehouse * 573b1e71b06SSteven Whitehouse */ 574b1e71b06SSteven Whitehouse 5754306629eSAndrew Price int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 5764306629eSAndrew Price unsigned size) 577b1e71b06SSteven Whitehouse { 578b1e71b06SSteven Whitehouse struct address_space *mapping = ip->i_inode.i_mapping; 57909cbfeafSKirill A. Shutemov unsigned long index = *pos / PAGE_SIZE; 58009cbfeafSKirill A. Shutemov unsigned offset = *pos & (PAGE_SIZE - 1); 581b1e71b06SSteven Whitehouse unsigned copied = 0; 582b1e71b06SSteven Whitehouse unsigned amt; 583b1e71b06SSteven Whitehouse struct page *page; 584b1e71b06SSteven Whitehouse void *p; 585b1e71b06SSteven Whitehouse 586b1e71b06SSteven Whitehouse do { 587b1e71b06SSteven Whitehouse amt = size - copied; 58809cbfeafSKirill A. Shutemov if (offset + size > PAGE_SIZE) 58909cbfeafSKirill A. Shutemov amt = PAGE_SIZE - offset; 590b1e71b06SSteven Whitehouse page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 591b1e71b06SSteven Whitehouse if (IS_ERR(page)) 592b1e71b06SSteven Whitehouse return PTR_ERR(page); 593d9349285SCong Wang p = kmap_atomic(page); 594b1e71b06SSteven Whitehouse memcpy(buf + copied, p + offset, amt); 595d9349285SCong Wang kunmap_atomic(p); 59609cbfeafSKirill A. Shutemov put_page(page); 597b1e71b06SSteven Whitehouse copied += amt; 598b1e71b06SSteven Whitehouse index++; 599b1e71b06SSteven Whitehouse offset = 0; 600b1e71b06SSteven Whitehouse } while(copied < size); 601b1e71b06SSteven Whitehouse (*pos) += size; 602b1e71b06SSteven Whitehouse return size; 603b1e71b06SSteven Whitehouse } 604b1e71b06SSteven Whitehouse 605b1e71b06SSteven Whitehouse /** 606b1e71b06SSteven Whitehouse * gfs2_readpages - Read a bunch of pages at once 6071272574bSFabian Frederick * @file: The file to read from 6081272574bSFabian Frederick * @mapping: Address space info 6091272574bSFabian Frederick * @pages: List of pages to read 6101272574bSFabian Frederick * @nr_pages: Number of pages to read 611b1e71b06SSteven Whitehouse * 612b1e71b06SSteven Whitehouse * Some notes: 613b1e71b06SSteven Whitehouse * 1. This is only for readahead, so we can simply ignore any things 614b1e71b06SSteven Whitehouse * which are slightly inconvenient (such as locking conflicts between 615b1e71b06SSteven Whitehouse * the page lock and the glock) and return having done no I/O. Its 616b1e71b06SSteven Whitehouse * obviously not something we'd want to do on too regular a basis. 617b1e71b06SSteven Whitehouse * Any I/O we ignore at this time will be done via readpage later. 618b1e71b06SSteven Whitehouse * 2. We don't handle stuffed files here we let readpage do the honours. 619b1e71b06SSteven Whitehouse * 3. mpage_readpages() does most of the heavy lifting in the common case. 620b1e71b06SSteven Whitehouse * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 621b1e71b06SSteven Whitehouse */ 622b1e71b06SSteven Whitehouse 623b1e71b06SSteven Whitehouse static int gfs2_readpages(struct file *file, struct address_space *mapping, 624b1e71b06SSteven Whitehouse struct list_head *pages, unsigned nr_pages) 625b1e71b06SSteven Whitehouse { 626b1e71b06SSteven Whitehouse struct inode *inode = mapping->host; 627b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 628b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(inode); 629b1e71b06SSteven Whitehouse struct gfs2_holder gh; 630b1e71b06SSteven Whitehouse int ret; 631b1e71b06SSteven Whitehouse 632b1e71b06SSteven Whitehouse gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 633b1e71b06SSteven Whitehouse ret = gfs2_glock_nq(&gh); 634b1e71b06SSteven Whitehouse if (unlikely(ret)) 635b1e71b06SSteven Whitehouse goto out_uninit; 636b1e71b06SSteven Whitehouse if (!gfs2_is_stuffed(ip)) 637b1e71b06SSteven Whitehouse ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 638b1e71b06SSteven Whitehouse gfs2_glock_dq(&gh); 639b1e71b06SSteven Whitehouse out_uninit: 640b1e71b06SSteven Whitehouse gfs2_holder_uninit(&gh); 641b1e71b06SSteven Whitehouse if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 642b1e71b06SSteven Whitehouse ret = -EIO; 643b1e71b06SSteven Whitehouse return ret; 644b1e71b06SSteven Whitehouse } 645b1e71b06SSteven Whitehouse 646b1e71b06SSteven Whitehouse /** 647b1e71b06SSteven Whitehouse * adjust_fs_space - Adjusts the free space available due to gfs2_grow 648b1e71b06SSteven Whitehouse * @inode: the rindex inode 649b1e71b06SSteven Whitehouse */ 65064bc06bbSAndreas Gruenbacher void adjust_fs_space(struct inode *inode) 651b1e71b06SSteven Whitehouse { 652b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 6531946f70aSBenjamin Marzinski struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 6541946f70aSBenjamin Marzinski struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 655b1e71b06SSteven Whitehouse struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 656b1e71b06SSteven Whitehouse struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 6571946f70aSBenjamin Marzinski struct buffer_head *m_bh, *l_bh; 658b1e71b06SSteven Whitehouse u64 fs_total, new_free; 659b1e71b06SSteven Whitehouse 660b1e71b06SSteven Whitehouse /* Total up the file system space, according to the latest rindex. */ 661b1e71b06SSteven Whitehouse fs_total = gfs2_ri_total(sdp); 6621946f70aSBenjamin Marzinski if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 6631946f70aSBenjamin Marzinski return; 664b1e71b06SSteven Whitehouse 665b1e71b06SSteven Whitehouse spin_lock(&sdp->sd_statfs_spin); 6661946f70aSBenjamin Marzinski gfs2_statfs_change_in(m_sc, m_bh->b_data + 6671946f70aSBenjamin Marzinski sizeof(struct gfs2_dinode)); 668b1e71b06SSteven Whitehouse if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 669b1e71b06SSteven Whitehouse new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 670b1e71b06SSteven Whitehouse else 671b1e71b06SSteven Whitehouse new_free = 0; 672b1e71b06SSteven Whitehouse spin_unlock(&sdp->sd_statfs_spin); 673b1e71b06SSteven Whitehouse fs_warn(sdp, "File system extended by %llu blocks.\n", 674b1e71b06SSteven Whitehouse (unsigned long long)new_free); 675b1e71b06SSteven Whitehouse gfs2_statfs_change(sdp, new_free, new_free, 0); 6761946f70aSBenjamin Marzinski 6771946f70aSBenjamin Marzinski if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 6781946f70aSBenjamin Marzinski goto out; 6791946f70aSBenjamin Marzinski update_statfs(sdp, m_bh, l_bh); 6801946f70aSBenjamin Marzinski brelse(l_bh); 6811946f70aSBenjamin Marzinski out: 6821946f70aSBenjamin Marzinski brelse(m_bh); 683b1e71b06SSteven Whitehouse } 684b1e71b06SSteven Whitehouse 685b1e71b06SSteven Whitehouse /** 686b1e71b06SSteven Whitehouse * gfs2_stuffed_write_end - Write end for stuffed files 687b1e71b06SSteven Whitehouse * @inode: The inode 688b1e71b06SSteven Whitehouse * @dibh: The buffer_head containing the on-disk inode 689b1e71b06SSteven Whitehouse * @pos: The file position 690b1e71b06SSteven Whitehouse * @copied: How much was actually copied by the VFS 691b1e71b06SSteven Whitehouse * @page: The page 692b1e71b06SSteven Whitehouse * 693b1e71b06SSteven Whitehouse * This copies the data from the page into the inode block after 694b1e71b06SSteven Whitehouse * the inode data structure itself. 695b1e71b06SSteven Whitehouse * 69664bc06bbSAndreas Gruenbacher * Returns: copied bytes or errno 697b1e71b06SSteven Whitehouse */ 69864bc06bbSAndreas Gruenbacher int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 699d6382a35SAndreas Gruenbacher loff_t pos, unsigned copied, 700b1e71b06SSteven Whitehouse struct page *page) 701b1e71b06SSteven Whitehouse { 702b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 703b1e71b06SSteven Whitehouse u64 to = pos + copied; 704b1e71b06SSteven Whitehouse void *kaddr; 705b1e71b06SSteven Whitehouse unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 706b1e71b06SSteven Whitehouse 707d6382a35SAndreas Gruenbacher BUG_ON(pos + copied > gfs2_max_stuffed_size(ip)); 708235628c5SAndreas Gruenbacher 709d9349285SCong Wang kaddr = kmap_atomic(page); 710b1e71b06SSteven Whitehouse memcpy(buf + pos, kaddr + pos, copied); 711b1e71b06SSteven Whitehouse flush_dcache_page(page); 712d9349285SCong Wang kunmap_atomic(kaddr); 713b1e71b06SSteven Whitehouse 71443388b21SAl Viro WARN_ON(!PageUptodate(page)); 715b1e71b06SSteven Whitehouse unlock_page(page); 71609cbfeafSKirill A. Shutemov put_page(page); 717b1e71b06SSteven Whitehouse 718b1e71b06SSteven Whitehouse if (copied) { 719a2e0f799SSteven Whitehouse if (inode->i_size < to) 720b1e71b06SSteven Whitehouse i_size_write(inode, to); 721b1e71b06SSteven Whitehouse mark_inode_dirty(inode); 722b1e71b06SSteven Whitehouse } 723b1e71b06SSteven Whitehouse return copied; 724b1e71b06SSteven Whitehouse } 725b1e71b06SSteven Whitehouse 726b1e71b06SSteven Whitehouse /** 727b9e03f18SBob Peterson * jdata_set_page_dirty - Page dirtying function 728b1e71b06SSteven Whitehouse * @page: The page to dirty 729b1e71b06SSteven Whitehouse * 730b1e71b06SSteven Whitehouse * Returns: 1 if it dirtyed the page, or 0 otherwise 731b1e71b06SSteven Whitehouse */ 732b1e71b06SSteven Whitehouse 733b9e03f18SBob Peterson static int jdata_set_page_dirty(struct page *page) 734b1e71b06SSteven Whitehouse { 735b1e71b06SSteven Whitehouse SetPageChecked(page); 736b1e71b06SSteven Whitehouse return __set_page_dirty_buffers(page); 737b1e71b06SSteven Whitehouse } 738b1e71b06SSteven Whitehouse 739b1e71b06SSteven Whitehouse /** 740b1e71b06SSteven Whitehouse * gfs2_bmap - Block map function 741b1e71b06SSteven Whitehouse * @mapping: Address space info 742b1e71b06SSteven Whitehouse * @lblock: The block to map 743b1e71b06SSteven Whitehouse * 744b1e71b06SSteven Whitehouse * Returns: The disk address for the block or 0 on hole or error 745b1e71b06SSteven Whitehouse */ 746b1e71b06SSteven Whitehouse 747b1e71b06SSteven Whitehouse static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 748b1e71b06SSteven Whitehouse { 749b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(mapping->host); 750b1e71b06SSteven Whitehouse struct gfs2_holder i_gh; 751b1e71b06SSteven Whitehouse sector_t dblock = 0; 752b1e71b06SSteven Whitehouse int error; 753b1e71b06SSteven Whitehouse 754b1e71b06SSteven Whitehouse error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 755b1e71b06SSteven Whitehouse if (error) 756b1e71b06SSteven Whitehouse return 0; 757b1e71b06SSteven Whitehouse 758b1e71b06SSteven Whitehouse if (!gfs2_is_stuffed(ip)) 759b1e71b06SSteven Whitehouse dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 760b1e71b06SSteven Whitehouse 761b1e71b06SSteven Whitehouse gfs2_glock_dq_uninit(&i_gh); 762b1e71b06SSteven Whitehouse 763b1e71b06SSteven Whitehouse return dblock; 764b1e71b06SSteven Whitehouse } 765b1e71b06SSteven Whitehouse 766b1e71b06SSteven Whitehouse static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 767b1e71b06SSteven Whitehouse { 768b1e71b06SSteven Whitehouse struct gfs2_bufdata *bd; 769b1e71b06SSteven Whitehouse 770b1e71b06SSteven Whitehouse lock_buffer(bh); 771b1e71b06SSteven Whitehouse gfs2_log_lock(sdp); 772b1e71b06SSteven Whitehouse clear_buffer_dirty(bh); 773b1e71b06SSteven Whitehouse bd = bh->b_private; 774b1e71b06SSteven Whitehouse if (bd) { 775c0752aa7SBob Peterson if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 776c0752aa7SBob Peterson list_del_init(&bd->bd_list); 777b1e71b06SSteven Whitehouse else 77868cd4ce2SBob Peterson gfs2_remove_from_journal(bh, REMOVE_JDATA); 779b1e71b06SSteven Whitehouse } 780b1e71b06SSteven Whitehouse bh->b_bdev = NULL; 781b1e71b06SSteven Whitehouse clear_buffer_mapped(bh); 782b1e71b06SSteven Whitehouse clear_buffer_req(bh); 783b1e71b06SSteven Whitehouse clear_buffer_new(bh); 784b1e71b06SSteven Whitehouse gfs2_log_unlock(sdp); 785b1e71b06SSteven Whitehouse unlock_buffer(bh); 786b1e71b06SSteven Whitehouse } 787b1e71b06SSteven Whitehouse 788d47992f8SLukas Czerner static void gfs2_invalidatepage(struct page *page, unsigned int offset, 789d47992f8SLukas Czerner unsigned int length) 790b1e71b06SSteven Whitehouse { 791b1e71b06SSteven Whitehouse struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 7925c0bb97cSLukas Czerner unsigned int stop = offset + length; 79309cbfeafSKirill A. Shutemov int partial_page = (offset || length < PAGE_SIZE); 794b1e71b06SSteven Whitehouse struct buffer_head *bh, *head; 795b1e71b06SSteven Whitehouse unsigned long pos = 0; 796b1e71b06SSteven Whitehouse 797b1e71b06SSteven Whitehouse BUG_ON(!PageLocked(page)); 7985c0bb97cSLukas Czerner if (!partial_page) 799b1e71b06SSteven Whitehouse ClearPageChecked(page); 800b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) 801b1e71b06SSteven Whitehouse goto out; 802b1e71b06SSteven Whitehouse 803b1e71b06SSteven Whitehouse bh = head = page_buffers(page); 804b1e71b06SSteven Whitehouse do { 8055c0bb97cSLukas Czerner if (pos + bh->b_size > stop) 8065c0bb97cSLukas Czerner return; 8075c0bb97cSLukas Czerner 808b1e71b06SSteven Whitehouse if (offset <= pos) 809b1e71b06SSteven Whitehouse gfs2_discard(sdp, bh); 810b1e71b06SSteven Whitehouse pos += bh->b_size; 811b1e71b06SSteven Whitehouse bh = bh->b_this_page; 812b1e71b06SSteven Whitehouse } while (bh != head); 813b1e71b06SSteven Whitehouse out: 8145c0bb97cSLukas Czerner if (!partial_page) 815b1e71b06SSteven Whitehouse try_to_release_page(page, 0); 816b1e71b06SSteven Whitehouse } 817b1e71b06SSteven Whitehouse 818b1e71b06SSteven Whitehouse /** 819b1e71b06SSteven Whitehouse * gfs2_releasepage - free the metadata associated with a page 820b1e71b06SSteven Whitehouse * @page: the page that's being released 821b1e71b06SSteven Whitehouse * @gfp_mask: passed from Linux VFS, ignored by us 822b1e71b06SSteven Whitehouse * 823b1e71b06SSteven Whitehouse * Call try_to_free_buffers() if the buffers in this page can be 824b1e71b06SSteven Whitehouse * released. 825b1e71b06SSteven Whitehouse * 826b1e71b06SSteven Whitehouse * Returns: 0 827b1e71b06SSteven Whitehouse */ 828b1e71b06SSteven Whitehouse 829b1e71b06SSteven Whitehouse int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 830b1e71b06SSteven Whitehouse { 831009d8518SSteven Whitehouse struct address_space *mapping = page->mapping; 832009d8518SSteven Whitehouse struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 833b1e71b06SSteven Whitehouse struct buffer_head *bh, *head; 834b1e71b06SSteven Whitehouse struct gfs2_bufdata *bd; 835b1e71b06SSteven Whitehouse 836b1e71b06SSteven Whitehouse if (!page_has_buffers(page)) 837b1e71b06SSteven Whitehouse return 0; 838b1e71b06SSteven Whitehouse 8391c185c02SAndreas Gruenbacher /* 8401c185c02SAndreas Gruenbacher * From xfs_vm_releasepage: mm accommodates an old ext3 case where 8411c185c02SAndreas Gruenbacher * clean pages might not have had the dirty bit cleared. Thus, it can 8421c185c02SAndreas Gruenbacher * send actual dirty pages to ->releasepage() via shrink_active_list(). 8431c185c02SAndreas Gruenbacher * 8441c185c02SAndreas Gruenbacher * As a workaround, we skip pages that contain dirty buffers below. 8451c185c02SAndreas Gruenbacher * Once ->releasepage isn't called on dirty pages anymore, we can warn 8461c185c02SAndreas Gruenbacher * on dirty buffers like we used to here again. 8471c185c02SAndreas Gruenbacher */ 8481c185c02SAndreas Gruenbacher 849b1e71b06SSteven Whitehouse gfs2_log_lock(sdp); 850380f7c65SSteven Whitehouse spin_lock(&sdp->sd_ail_lock); 851b1e71b06SSteven Whitehouse head = bh = page_buffers(page); 852b1e71b06SSteven Whitehouse do { 853b1e71b06SSteven Whitehouse if (atomic_read(&bh->b_count)) 854b1e71b06SSteven Whitehouse goto cannot_release; 855b1e71b06SSteven Whitehouse bd = bh->b_private; 85616ca9412SBenjamin Marzinski if (bd && bd->bd_tr) 857b1e71b06SSteven Whitehouse goto cannot_release; 8581c185c02SAndreas Gruenbacher if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 8591c185c02SAndreas Gruenbacher goto cannot_release; 860b1e71b06SSteven Whitehouse bh = bh->b_this_page; 861b1e71b06SSteven Whitehouse } while(bh != head); 862380f7c65SSteven Whitehouse spin_unlock(&sdp->sd_ail_lock); 863b1e71b06SSteven Whitehouse 864b1e71b06SSteven Whitehouse head = bh = page_buffers(page); 865b1e71b06SSteven Whitehouse do { 866b1e71b06SSteven Whitehouse bd = bh->b_private; 867b1e71b06SSteven Whitehouse if (bd) { 868b1e71b06SSteven Whitehouse gfs2_assert_warn(sdp, bd->bd_bh == bh); 869e4f29206SSteven Whitehouse if (!list_empty(&bd->bd_list)) 870c0752aa7SBob Peterson list_del_init(&bd->bd_list); 871b1e71b06SSteven Whitehouse bd->bd_bh = NULL; 872b1e71b06SSteven Whitehouse bh->b_private = NULL; 873b1e71b06SSteven Whitehouse kmem_cache_free(gfs2_bufdata_cachep, bd); 874e4f29206SSteven Whitehouse } 875b1e71b06SSteven Whitehouse 876b1e71b06SSteven Whitehouse bh = bh->b_this_page; 877b1e71b06SSteven Whitehouse } while (bh != head); 878e4f29206SSteven Whitehouse gfs2_log_unlock(sdp); 879b1e71b06SSteven Whitehouse 880b1e71b06SSteven Whitehouse return try_to_free_buffers(page); 8818f065d36SSteven Whitehouse 882b1e71b06SSteven Whitehouse cannot_release: 883380f7c65SSteven Whitehouse spin_unlock(&sdp->sd_ail_lock); 884b1e71b06SSteven Whitehouse gfs2_log_unlock(sdp); 885b1e71b06SSteven Whitehouse return 0; 886b1e71b06SSteven Whitehouse } 887b1e71b06SSteven Whitehouse 888b1e71b06SSteven Whitehouse static const struct address_space_operations gfs2_writeback_aops = { 8899d358143SSteven Whitehouse .writepage = gfs2_writepage, 89045138990SSteven Whitehouse .writepages = gfs2_writepages, 891b1e71b06SSteven Whitehouse .readpage = gfs2_readpage, 892b1e71b06SSteven Whitehouse .readpages = gfs2_readpages, 893b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 894b1e71b06SSteven Whitehouse .invalidatepage = gfs2_invalidatepage, 895b1e71b06SSteven Whitehouse .releasepage = gfs2_releasepage, 896967bcc91SAndreas Gruenbacher .direct_IO = noop_direct_IO, 897b1e71b06SSteven Whitehouse .migratepage = buffer_migrate_page, 898b1e71b06SSteven Whitehouse .is_partially_uptodate = block_is_partially_uptodate, 899aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 900b1e71b06SSteven Whitehouse }; 901b1e71b06SSteven Whitehouse 902b1e71b06SSteven Whitehouse static const struct address_space_operations gfs2_ordered_aops = { 9039d358143SSteven Whitehouse .writepage = gfs2_writepage, 90445138990SSteven Whitehouse .writepages = gfs2_writepages, 905b1e71b06SSteven Whitehouse .readpage = gfs2_readpage, 906b1e71b06SSteven Whitehouse .readpages = gfs2_readpages, 907b9e03f18SBob Peterson .set_page_dirty = __set_page_dirty_buffers, 908b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 909b1e71b06SSteven Whitehouse .invalidatepage = gfs2_invalidatepage, 910b1e71b06SSteven Whitehouse .releasepage = gfs2_releasepage, 911967bcc91SAndreas Gruenbacher .direct_IO = noop_direct_IO, 912b1e71b06SSteven Whitehouse .migratepage = buffer_migrate_page, 913b1e71b06SSteven Whitehouse .is_partially_uptodate = block_is_partially_uptodate, 914aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 915b1e71b06SSteven Whitehouse }; 916b1e71b06SSteven Whitehouse 917b1e71b06SSteven Whitehouse static const struct address_space_operations gfs2_jdata_aops = { 918b1e71b06SSteven Whitehouse .writepage = gfs2_jdata_writepage, 919b1e71b06SSteven Whitehouse .writepages = gfs2_jdata_writepages, 920b1e71b06SSteven Whitehouse .readpage = gfs2_readpage, 921b1e71b06SSteven Whitehouse .readpages = gfs2_readpages, 922b9e03f18SBob Peterson .set_page_dirty = jdata_set_page_dirty, 923b1e71b06SSteven Whitehouse .bmap = gfs2_bmap, 924b1e71b06SSteven Whitehouse .invalidatepage = gfs2_invalidatepage, 925b1e71b06SSteven Whitehouse .releasepage = gfs2_releasepage, 926b1e71b06SSteven Whitehouse .is_partially_uptodate = block_is_partially_uptodate, 927aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 928b1e71b06SSteven Whitehouse }; 929b1e71b06SSteven Whitehouse 930b1e71b06SSteven Whitehouse void gfs2_set_aops(struct inode *inode) 931b1e71b06SSteven Whitehouse { 932b1e71b06SSteven Whitehouse struct gfs2_inode *ip = GFS2_I(inode); 933977767a7SAndreas Gruenbacher struct gfs2_sbd *sdp = GFS2_SB(inode); 934b1e71b06SSteven Whitehouse 935977767a7SAndreas Gruenbacher if (gfs2_is_jdata(ip)) 936b1e71b06SSteven Whitehouse inode->i_mapping->a_ops = &gfs2_jdata_aops; 937977767a7SAndreas Gruenbacher else if (gfs2_is_writeback(sdp)) 938977767a7SAndreas Gruenbacher inode->i_mapping->a_ops = &gfs2_writeback_aops; 939977767a7SAndreas Gruenbacher else if (gfs2_is_ordered(sdp)) 940977767a7SAndreas Gruenbacher inode->i_mapping->a_ops = &gfs2_ordered_aops; 941b1e71b06SSteven Whitehouse else 942b1e71b06SSteven Whitehouse BUG(); 943b1e71b06SSteven Whitehouse } 944