xref: /openbmc/linux/fs/gfs2/aops.c (revision 68189fef)
17336d0e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2b1e71b06SSteven Whitehouse /*
3b1e71b06SSteven Whitehouse  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4b1e71b06SSteven Whitehouse  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5b1e71b06SSteven Whitehouse  */
6b1e71b06SSteven Whitehouse 
7b1e71b06SSteven Whitehouse #include <linux/sched.h>
8b1e71b06SSteven Whitehouse #include <linux/slab.h>
9b1e71b06SSteven Whitehouse #include <linux/spinlock.h>
10b1e71b06SSteven Whitehouse #include <linux/completion.h>
11b1e71b06SSteven Whitehouse #include <linux/buffer_head.h>
12b1e71b06SSteven Whitehouse #include <linux/pagemap.h>
13b1e71b06SSteven Whitehouse #include <linux/pagevec.h>
14b1e71b06SSteven Whitehouse #include <linux/mpage.h>
15b1e71b06SSteven Whitehouse #include <linux/fs.h>
16b1e71b06SSteven Whitehouse #include <linux/writeback.h>
17b1e71b06SSteven Whitehouse #include <linux/swap.h>
18b1e71b06SSteven Whitehouse #include <linux/gfs2_ondisk.h>
19b1e71b06SSteven Whitehouse #include <linux/backing-dev.h>
20e2e40f2cSChristoph Hellwig #include <linux/uio.h>
21774016b2SSteven Whitehouse #include <trace/events/writeback.h>
2264bc06bbSAndreas Gruenbacher #include <linux/sched/signal.h>
23b1e71b06SSteven Whitehouse 
24b1e71b06SSteven Whitehouse #include "gfs2.h"
25b1e71b06SSteven Whitehouse #include "incore.h"
26b1e71b06SSteven Whitehouse #include "bmap.h"
27b1e71b06SSteven Whitehouse #include "glock.h"
28b1e71b06SSteven Whitehouse #include "inode.h"
29b1e71b06SSteven Whitehouse #include "log.h"
30b1e71b06SSteven Whitehouse #include "meta_io.h"
31b1e71b06SSteven Whitehouse #include "quota.h"
32b1e71b06SSteven Whitehouse #include "trans.h"
33b1e71b06SSteven Whitehouse #include "rgrp.h"
34b1e71b06SSteven Whitehouse #include "super.h"
35b1e71b06SSteven Whitehouse #include "util.h"
36b1e71b06SSteven Whitehouse #include "glops.h"
3764bc06bbSAndreas Gruenbacher #include "aops.h"
38b1e71b06SSteven Whitehouse 
39b1e71b06SSteven Whitehouse 
4064bc06bbSAndreas Gruenbacher void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
4188b65ce5SAndreas Gruenbacher 			    unsigned int from, unsigned int len)
42b1e71b06SSteven Whitehouse {
43b1e71b06SSteven Whitehouse 	struct buffer_head *head = page_buffers(page);
44b1e71b06SSteven Whitehouse 	unsigned int bsize = head->b_size;
45b1e71b06SSteven Whitehouse 	struct buffer_head *bh;
4688b65ce5SAndreas Gruenbacher 	unsigned int to = from + len;
47b1e71b06SSteven Whitehouse 	unsigned int start, end;
48b1e71b06SSteven Whitehouse 
49b1e71b06SSteven Whitehouse 	for (bh = head, start = 0; bh != head || !start;
50b1e71b06SSteven Whitehouse 	     bh = bh->b_this_page, start = end) {
51b1e71b06SSteven Whitehouse 		end = start + bsize;
5288b65ce5SAndreas Gruenbacher 		if (end <= from)
53b1e71b06SSteven Whitehouse 			continue;
5488b65ce5SAndreas Gruenbacher 		if (start >= to)
5588b65ce5SAndreas Gruenbacher 			break;
56b1e71b06SSteven Whitehouse 		set_buffer_uptodate(bh);
57350a9b0aSSteven Whitehouse 		gfs2_trans_add_data(ip->i_gl, bh);
58b1e71b06SSteven Whitehouse 	}
59b1e71b06SSteven Whitehouse }
60b1e71b06SSteven Whitehouse 
61b1e71b06SSteven Whitehouse /**
62b1e71b06SSteven Whitehouse  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63b1e71b06SSteven Whitehouse  * @inode: The inode
64b1e71b06SSteven Whitehouse  * @lblock: The block number to look up
65b1e71b06SSteven Whitehouse  * @bh_result: The buffer head to return the result in
66b1e71b06SSteven Whitehouse  * @create: Non-zero if we may add block to the file
67b1e71b06SSteven Whitehouse  *
68b1e71b06SSteven Whitehouse  * Returns: errno
69b1e71b06SSteven Whitehouse  */
70b1e71b06SSteven Whitehouse 
71b1e71b06SSteven Whitehouse static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72b1e71b06SSteven Whitehouse 				  struct buffer_head *bh_result, int create)
73b1e71b06SSteven Whitehouse {
74b1e71b06SSteven Whitehouse 	int error;
75b1e71b06SSteven Whitehouse 
76b1e71b06SSteven Whitehouse 	error = gfs2_block_map(inode, lblock, bh_result, 0);
77b1e71b06SSteven Whitehouse 	if (error)
78b1e71b06SSteven Whitehouse 		return error;
79b1e71b06SSteven Whitehouse 	if (!buffer_mapped(bh_result))
804e79e3f0SBob Peterson 		return -ENODATA;
81b1e71b06SSteven Whitehouse 	return 0;
82b1e71b06SSteven Whitehouse }
83b1e71b06SSteven Whitehouse 
84b1e71b06SSteven Whitehouse /**
8559c01c50SChristoph Hellwig  * gfs2_writepage - Write page for writeback mappings
8659c01c50SChristoph Hellwig  * @page: The page
87b1e71b06SSteven Whitehouse  * @wbc: The writeback control
88b1e71b06SSteven Whitehouse  */
8959c01c50SChristoph Hellwig static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
90b1e71b06SSteven Whitehouse {
91b1e71b06SSteven Whitehouse 	struct inode *inode = page->mapping->host;
92b1e71b06SSteven Whitehouse 	struct gfs2_inode *ip = GFS2_I(inode);
93b1e71b06SSteven Whitehouse 	struct gfs2_sbd *sdp = GFS2_SB(inode);
942164f9b9SChristoph Hellwig 	struct iomap_writepage_ctx wpc = { };
95b1e71b06SSteven Whitehouse 
96b1e71b06SSteven Whitehouse 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
97b1e71b06SSteven Whitehouse 		goto out;
98b1e71b06SSteven Whitehouse 	if (current->journal_info)
99b1e71b06SSteven Whitehouse 		goto redirty;
1002164f9b9SChristoph Hellwig 	return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
10159c01c50SChristoph Hellwig 
102b1e71b06SSteven Whitehouse redirty:
103b1e71b06SSteven Whitehouse 	redirty_page_for_writepage(wbc, page);
104b1e71b06SSteven Whitehouse out:
105b1e71b06SSteven Whitehouse 	unlock_page(page);
106b1e71b06SSteven Whitehouse 	return 0;
107b1e71b06SSteven Whitehouse }
108b1e71b06SSteven Whitehouse 
10921b6924bSBob Peterson /**
11021b6924bSBob Peterson  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
11121b6924bSBob Peterson  * @page: The page to write
11221b6924bSBob Peterson  * @wbc: The writeback control
11321b6924bSBob Peterson  *
11421b6924bSBob Peterson  * This is the same as calling block_write_full_page, but it also
115fd4c5748SBenjamin Marzinski  * writes pages outside of i_size
116fd4c5748SBenjamin Marzinski  */
11721b6924bSBob Peterson static int gfs2_write_jdata_page(struct page *page,
118fd4c5748SBenjamin Marzinski 				 struct writeback_control *wbc)
119fd4c5748SBenjamin Marzinski {
120fd4c5748SBenjamin Marzinski 	struct inode * const inode = page->mapping->host;
121fd4c5748SBenjamin Marzinski 	loff_t i_size = i_size_read(inode);
122fd4c5748SBenjamin Marzinski 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
123fd4c5748SBenjamin Marzinski 	unsigned offset;
124fd4c5748SBenjamin Marzinski 
125fd4c5748SBenjamin Marzinski 	/*
126fd4c5748SBenjamin Marzinski 	 * The page straddles i_size.  It must be zeroed out on each and every
127fd4c5748SBenjamin Marzinski 	 * writepage invocation because it may be mmapped.  "A file is mapped
128fd4c5748SBenjamin Marzinski 	 * in multiples of the page size.  For a file that is not a multiple of
129fd4c5748SBenjamin Marzinski 	 * the  page size, the remaining memory is zeroed when mapped, and
130fd4c5748SBenjamin Marzinski 	 * writes to that region are not written out to the file."
131fd4c5748SBenjamin Marzinski 	 */
132fd4c5748SBenjamin Marzinski 	offset = i_size & (PAGE_SIZE - 1);
133fd4c5748SBenjamin Marzinski 	if (page->index == end_index && offset)
134fd4c5748SBenjamin Marzinski 		zero_user_segment(page, offset, PAGE_SIZE);
135fd4c5748SBenjamin Marzinski 
13621b6924bSBob Peterson 	return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
137fd4c5748SBenjamin Marzinski 				       end_buffer_async_write);
138fd4c5748SBenjamin Marzinski }
139fd4c5748SBenjamin Marzinski 
140b1e71b06SSteven Whitehouse /**
141b1e71b06SSteven Whitehouse  * __gfs2_jdata_writepage - The core of jdata writepage
142b1e71b06SSteven Whitehouse  * @page: The page to write
143b1e71b06SSteven Whitehouse  * @wbc: The writeback control
144b1e71b06SSteven Whitehouse  *
145b1e71b06SSteven Whitehouse  * This is shared between writepage and writepages and implements the
146b1e71b06SSteven Whitehouse  * core of the writepage operation. If a transaction is required then
147b1e71b06SSteven Whitehouse  * PageChecked will have been set and the transaction will have
148b1e71b06SSteven Whitehouse  * already been started before this is called.
149b1e71b06SSteven Whitehouse  */
150b1e71b06SSteven Whitehouse 
151b1e71b06SSteven Whitehouse static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152b1e71b06SSteven Whitehouse {
153b1e71b06SSteven Whitehouse 	struct inode *inode = page->mapping->host;
154b1e71b06SSteven Whitehouse 	struct gfs2_inode *ip = GFS2_I(inode);
155b1e71b06SSteven Whitehouse 	struct gfs2_sbd *sdp = GFS2_SB(inode);
156b1e71b06SSteven Whitehouse 
157b1e71b06SSteven Whitehouse 	if (PageChecked(page)) {
158b1e71b06SSteven Whitehouse 		ClearPageChecked(page);
159b1e71b06SSteven Whitehouse 		if (!page_has_buffers(page)) {
160b1e71b06SSteven Whitehouse 			create_empty_buffers(page, inode->i_sb->s_blocksize,
16147a9a527SFabian Frederick 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
162b1e71b06SSteven Whitehouse 		}
16388b65ce5SAndreas Gruenbacher 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
164b1e71b06SSteven Whitehouse 	}
16521b6924bSBob Peterson 	return gfs2_write_jdata_page(page, wbc);
166b1e71b06SSteven Whitehouse }
167b1e71b06SSteven Whitehouse 
168b1e71b06SSteven Whitehouse /**
169b1e71b06SSteven Whitehouse  * gfs2_jdata_writepage - Write complete page
170b1e71b06SSteven Whitehouse  * @page: Page to write
1711272574bSFabian Frederick  * @wbc: The writeback control
172b1e71b06SSteven Whitehouse  *
173b1e71b06SSteven Whitehouse  * Returns: errno
174b1e71b06SSteven Whitehouse  *
175b1e71b06SSteven Whitehouse  */
176b1e71b06SSteven Whitehouse 
177b1e71b06SSteven Whitehouse static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
178b1e71b06SSteven Whitehouse {
179b1e71b06SSteven Whitehouse 	struct inode *inode = page->mapping->host;
180fd4c5748SBenjamin Marzinski 	struct gfs2_inode *ip = GFS2_I(inode);
181b1e71b06SSteven Whitehouse 	struct gfs2_sbd *sdp = GFS2_SB(inode);
182b1e71b06SSteven Whitehouse 
183fd4c5748SBenjamin Marzinski 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
184fd4c5748SBenjamin Marzinski 		goto out;
185fd4c5748SBenjamin Marzinski 	if (PageChecked(page) || current->journal_info)
186b1e71b06SSteven Whitehouse 		goto out_ignore;
187e556280dSBob Peterson 	return __gfs2_jdata_writepage(page, wbc);
188b1e71b06SSteven Whitehouse 
189b1e71b06SSteven Whitehouse out_ignore:
190b1e71b06SSteven Whitehouse 	redirty_page_for_writepage(wbc, page);
191fd4c5748SBenjamin Marzinski out:
192b1e71b06SSteven Whitehouse 	unlock_page(page);
193b1e71b06SSteven Whitehouse 	return 0;
194b1e71b06SSteven Whitehouse }
195b1e71b06SSteven Whitehouse 
196b1e71b06SSteven Whitehouse /**
19745138990SSteven Whitehouse  * gfs2_writepages - Write a bunch of dirty pages back to disk
198b1e71b06SSteven Whitehouse  * @mapping: The mapping to write
199b1e71b06SSteven Whitehouse  * @wbc: Write-back control
200b1e71b06SSteven Whitehouse  *
20145138990SSteven Whitehouse  * Used for both ordered and writeback modes.
202b1e71b06SSteven Whitehouse  */
20345138990SSteven Whitehouse static int gfs2_writepages(struct address_space *mapping,
204b1e71b06SSteven Whitehouse 			   struct writeback_control *wbc)
205b1e71b06SSteven Whitehouse {
206b066a4eeSAbhi Das 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
2072164f9b9SChristoph Hellwig 	struct iomap_writepage_ctx wpc = { };
2082164f9b9SChristoph Hellwig 	int ret;
209b066a4eeSAbhi Das 
210b066a4eeSAbhi Das 	/*
211b066a4eeSAbhi Das 	 * Even if we didn't write any pages here, we might still be holding
212b066a4eeSAbhi Das 	 * dirty pages in the ail. We forcibly flush the ail because we don't
213b066a4eeSAbhi Das 	 * want balance_dirty_pages() to loop indefinitely trying to write out
214b066a4eeSAbhi Das 	 * pages held in the ail that it can't find.
215b066a4eeSAbhi Das 	 */
2162164f9b9SChristoph Hellwig 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
217b066a4eeSAbhi Das 	if (ret == 0)
218b066a4eeSAbhi Das 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
219b066a4eeSAbhi Das 	return ret;
220b1e71b06SSteven Whitehouse }
221b1e71b06SSteven Whitehouse 
222b1e71b06SSteven Whitehouse /**
223b1e71b06SSteven Whitehouse  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
224b1e71b06SSteven Whitehouse  * @mapping: The mapping
225b1e71b06SSteven Whitehouse  * @wbc: The writeback control
226b1e71b06SSteven Whitehouse  * @pvec: The vector of pages
227b1e71b06SSteven Whitehouse  * @nr_pages: The number of pages to write
2281272574bSFabian Frederick  * @done_index: Page index
229b1e71b06SSteven Whitehouse  *
230b1e71b06SSteven Whitehouse  * Returns: non-zero if loop should terminate, zero otherwise
231b1e71b06SSteven Whitehouse  */
232b1e71b06SSteven Whitehouse 
233b1e71b06SSteven Whitehouse static int gfs2_write_jdata_pagevec(struct address_space *mapping,
234b1e71b06SSteven Whitehouse 				    struct writeback_control *wbc,
235b1e71b06SSteven Whitehouse 				    struct pagevec *pvec,
2369aa01593SAndreas Gruenbacher 				    int nr_pages,
237774016b2SSteven Whitehouse 				    pgoff_t *done_index)
238b1e71b06SSteven Whitehouse {
239b1e71b06SSteven Whitehouse 	struct inode *inode = mapping->host;
240b1e71b06SSteven Whitehouse 	struct gfs2_sbd *sdp = GFS2_SB(inode);
24145eb0504SAndreas Gruenbacher 	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
242b1e71b06SSteven Whitehouse 	int i;
243b1e71b06SSteven Whitehouse 	int ret;
244b1e71b06SSteven Whitehouse 
245b1e71b06SSteven Whitehouse 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
246b1e71b06SSteven Whitehouse 	if (ret < 0)
247b1e71b06SSteven Whitehouse 		return ret;
248b1e71b06SSteven Whitehouse 
249b1e71b06SSteven Whitehouse 	for(i = 0; i < nr_pages; i++) {
250b1e71b06SSteven Whitehouse 		struct page *page = pvec->pages[i];
251b1e71b06SSteven Whitehouse 
252774016b2SSteven Whitehouse 		*done_index = page->index;
253774016b2SSteven Whitehouse 
254b1e71b06SSteven Whitehouse 		lock_page(page);
255b1e71b06SSteven Whitehouse 
256b1e71b06SSteven Whitehouse 		if (unlikely(page->mapping != mapping)) {
257774016b2SSteven Whitehouse continue_unlock:
258b1e71b06SSteven Whitehouse 			unlock_page(page);
259b1e71b06SSteven Whitehouse 			continue;
260b1e71b06SSteven Whitehouse 		}
261b1e71b06SSteven Whitehouse 
262774016b2SSteven Whitehouse 		if (!PageDirty(page)) {
263774016b2SSteven Whitehouse 			/* someone wrote it for us */
264774016b2SSteven Whitehouse 			goto continue_unlock;
265b1e71b06SSteven Whitehouse 		}
266b1e71b06SSteven Whitehouse 
267774016b2SSteven Whitehouse 		if (PageWriteback(page)) {
268b1e71b06SSteven Whitehouse 			if (wbc->sync_mode != WB_SYNC_NONE)
269b1e71b06SSteven Whitehouse 				wait_on_page_writeback(page);
270774016b2SSteven Whitehouse 			else
271774016b2SSteven Whitehouse 				goto continue_unlock;
272b1e71b06SSteven Whitehouse 		}
273b1e71b06SSteven Whitehouse 
274774016b2SSteven Whitehouse 		BUG_ON(PageWriteback(page));
275774016b2SSteven Whitehouse 		if (!clear_page_dirty_for_io(page))
276774016b2SSteven Whitehouse 			goto continue_unlock;
277774016b2SSteven Whitehouse 
278de1414a6SChristoph Hellwig 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
279b1e71b06SSteven Whitehouse 
280b1e71b06SSteven Whitehouse 		ret = __gfs2_jdata_writepage(page, wbc);
281774016b2SSteven Whitehouse 		if (unlikely(ret)) {
282774016b2SSteven Whitehouse 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
283774016b2SSteven Whitehouse 				unlock_page(page);
284774016b2SSteven Whitehouse 				ret = 0;
285774016b2SSteven Whitehouse 			} else {
286b1e71b06SSteven Whitehouse 
287774016b2SSteven Whitehouse 				/*
288774016b2SSteven Whitehouse 				 * done_index is set past this page,
289774016b2SSteven Whitehouse 				 * so media errors will not choke
290774016b2SSteven Whitehouse 				 * background writeout for the entire
291774016b2SSteven Whitehouse 				 * file. This has consequences for
292774016b2SSteven Whitehouse 				 * range_cyclic semantics (ie. it may
293774016b2SSteven Whitehouse 				 * not be suitable for data integrity
294774016b2SSteven Whitehouse 				 * writeout).
295774016b2SSteven Whitehouse 				 */
296774016b2SSteven Whitehouse 				*done_index = page->index + 1;
297b1e71b06SSteven Whitehouse 				ret = 1;
298774016b2SSteven Whitehouse 				break;
299774016b2SSteven Whitehouse 			}
300774016b2SSteven Whitehouse 		}
301774016b2SSteven Whitehouse 
302774016b2SSteven Whitehouse 		/*
303774016b2SSteven Whitehouse 		 * We stop writing back only if we are not doing
304774016b2SSteven Whitehouse 		 * integrity sync. In case of integrity sync we have to
305774016b2SSteven Whitehouse 		 * keep going until we have written all the pages
306774016b2SSteven Whitehouse 		 * we tagged for writeback prior to entering this loop.
307774016b2SSteven Whitehouse 		 */
308774016b2SSteven Whitehouse 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
309774016b2SSteven Whitehouse 			ret = 1;
310774016b2SSteven Whitehouse 			break;
311774016b2SSteven Whitehouse 		}
312774016b2SSteven Whitehouse 
313b1e71b06SSteven Whitehouse 	}
314b1e71b06SSteven Whitehouse 	gfs2_trans_end(sdp);
315b1e71b06SSteven Whitehouse 	return ret;
316b1e71b06SSteven Whitehouse }
317b1e71b06SSteven Whitehouse 
318b1e71b06SSteven Whitehouse /**
319b1e71b06SSteven Whitehouse  * gfs2_write_cache_jdata - Like write_cache_pages but different
320b1e71b06SSteven Whitehouse  * @mapping: The mapping to write
321b1e71b06SSteven Whitehouse  * @wbc: The writeback control
322b1e71b06SSteven Whitehouse  *
323b1e71b06SSteven Whitehouse  * The reason that we use our own function here is that we need to
324b1e71b06SSteven Whitehouse  * start transactions before we grab page locks. This allows us
325b1e71b06SSteven Whitehouse  * to get the ordering right.
326b1e71b06SSteven Whitehouse  */
327b1e71b06SSteven Whitehouse 
328b1e71b06SSteven Whitehouse static int gfs2_write_cache_jdata(struct address_space *mapping,
329b1e71b06SSteven Whitehouse 				  struct writeback_control *wbc)
330b1e71b06SSteven Whitehouse {
331b1e71b06SSteven Whitehouse 	int ret = 0;
332b1e71b06SSteven Whitehouse 	int done = 0;
333b1e71b06SSteven Whitehouse 	struct pagevec pvec;
334b1e71b06SSteven Whitehouse 	int nr_pages;
3353f649ab7SKees Cook 	pgoff_t writeback_index;
336b1e71b06SSteven Whitehouse 	pgoff_t index;
337b1e71b06SSteven Whitehouse 	pgoff_t end;
338774016b2SSteven Whitehouse 	pgoff_t done_index;
339774016b2SSteven Whitehouse 	int cycled;
340b1e71b06SSteven Whitehouse 	int range_whole = 0;
34110bbd235SMatthew Wilcox 	xa_mark_t tag;
342b1e71b06SSteven Whitehouse 
34386679820SMel Gorman 	pagevec_init(&pvec);
344b1e71b06SSteven Whitehouse 	if (wbc->range_cyclic) {
345774016b2SSteven Whitehouse 		writeback_index = mapping->writeback_index; /* prev offset */
346774016b2SSteven Whitehouse 		index = writeback_index;
347774016b2SSteven Whitehouse 		if (index == 0)
348774016b2SSteven Whitehouse 			cycled = 1;
349774016b2SSteven Whitehouse 		else
350774016b2SSteven Whitehouse 			cycled = 0;
351b1e71b06SSteven Whitehouse 		end = -1;
352b1e71b06SSteven Whitehouse 	} else {
35309cbfeafSKirill A. Shutemov 		index = wbc->range_start >> PAGE_SHIFT;
35409cbfeafSKirill A. Shutemov 		end = wbc->range_end >> PAGE_SHIFT;
355b1e71b06SSteven Whitehouse 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
356b1e71b06SSteven Whitehouse 			range_whole = 1;
357774016b2SSteven Whitehouse 		cycled = 1; /* ignore range_cyclic tests */
358b1e71b06SSteven Whitehouse 	}
359774016b2SSteven Whitehouse 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
360774016b2SSteven Whitehouse 		tag = PAGECACHE_TAG_TOWRITE;
361774016b2SSteven Whitehouse 	else
362774016b2SSteven Whitehouse 		tag = PAGECACHE_TAG_DIRTY;
363b1e71b06SSteven Whitehouse 
364b1e71b06SSteven Whitehouse retry:
365774016b2SSteven Whitehouse 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
366774016b2SSteven Whitehouse 		tag_pages_for_writeback(mapping, index, end);
367774016b2SSteven Whitehouse 	done_index = index;
368774016b2SSteven Whitehouse 	while (!done && (index <= end)) {
369d2bc5b3cSJan Kara 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
37067fd707fSJan Kara 				tag);
371774016b2SSteven Whitehouse 		if (nr_pages == 0)
372774016b2SSteven Whitehouse 			break;
373774016b2SSteven Whitehouse 
3749aa01593SAndreas Gruenbacher 		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
375b1e71b06SSteven Whitehouse 		if (ret)
376b1e71b06SSteven Whitehouse 			done = 1;
377b1e71b06SSteven Whitehouse 		if (ret > 0)
378b1e71b06SSteven Whitehouse 			ret = 0;
379b1e71b06SSteven Whitehouse 		pagevec_release(&pvec);
380b1e71b06SSteven Whitehouse 		cond_resched();
381b1e71b06SSteven Whitehouse 	}
382b1e71b06SSteven Whitehouse 
383774016b2SSteven Whitehouse 	if (!cycled && !done) {
384b1e71b06SSteven Whitehouse 		/*
385774016b2SSteven Whitehouse 		 * range_cyclic:
386b1e71b06SSteven Whitehouse 		 * We hit the last page and there is more work to be done: wrap
387b1e71b06SSteven Whitehouse 		 * back to the start of the file
388b1e71b06SSteven Whitehouse 		 */
389774016b2SSteven Whitehouse 		cycled = 1;
390b1e71b06SSteven Whitehouse 		index = 0;
391774016b2SSteven Whitehouse 		end = writeback_index - 1;
392b1e71b06SSteven Whitehouse 		goto retry;
393b1e71b06SSteven Whitehouse 	}
394b1e71b06SSteven Whitehouse 
395b1e71b06SSteven Whitehouse 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
396774016b2SSteven Whitehouse 		mapping->writeback_index = done_index;
397774016b2SSteven Whitehouse 
398b1e71b06SSteven Whitehouse 	return ret;
399b1e71b06SSteven Whitehouse }
400b1e71b06SSteven Whitehouse 
401b1e71b06SSteven Whitehouse 
402b1e71b06SSteven Whitehouse /**
403b1e71b06SSteven Whitehouse  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404b1e71b06SSteven Whitehouse  * @mapping: The mapping to write
405b1e71b06SSteven Whitehouse  * @wbc: The writeback control
406b1e71b06SSteven Whitehouse  *
407b1e71b06SSteven Whitehouse  */
408b1e71b06SSteven Whitehouse 
409b1e71b06SSteven Whitehouse static int gfs2_jdata_writepages(struct address_space *mapping,
410b1e71b06SSteven Whitehouse 				 struct writeback_control *wbc)
411b1e71b06SSteven Whitehouse {
412b1e71b06SSteven Whitehouse 	struct gfs2_inode *ip = GFS2_I(mapping->host);
413b1e71b06SSteven Whitehouse 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414b1e71b06SSteven Whitehouse 	int ret;
415b1e71b06SSteven Whitehouse 
416b1e71b06SSteven Whitehouse 	ret = gfs2_write_cache_jdata(mapping, wbc);
417b1e71b06SSteven Whitehouse 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418805c0907SBob Peterson 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
419805c0907SBob Peterson 			       GFS2_LFC_JDATA_WPAGES);
420b1e71b06SSteven Whitehouse 		ret = gfs2_write_cache_jdata(mapping, wbc);
421b1e71b06SSteven Whitehouse 	}
422b1e71b06SSteven Whitehouse 	return ret;
423b1e71b06SSteven Whitehouse }
424b1e71b06SSteven Whitehouse 
425b1e71b06SSteven Whitehouse /**
426b1e71b06SSteven Whitehouse  * stuffed_readpage - Fill in a Linux page with stuffed file data
427b1e71b06SSteven Whitehouse  * @ip: the inode
428b1e71b06SSteven Whitehouse  * @page: the page
429b1e71b06SSteven Whitehouse  *
430b1e71b06SSteven Whitehouse  * Returns: errno
431b1e71b06SSteven Whitehouse  */
432378b6cbfSChristoph Hellwig static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
433b1e71b06SSteven Whitehouse {
434b1e71b06SSteven Whitehouse 	struct buffer_head *dibh;
435602c89d2SSteven Whitehouse 	u64 dsize = i_size_read(&ip->i_inode);
436b1e71b06SSteven Whitehouse 	void *kaddr;
437b1e71b06SSteven Whitehouse 	int error;
438b1e71b06SSteven Whitehouse 
439b1e71b06SSteven Whitehouse 	/*
440b1e71b06SSteven Whitehouse 	 * Due to the order of unstuffing files and ->fault(), we can be
441b1e71b06SSteven Whitehouse 	 * asked for a zero page in the case of a stuffed file being extended,
442b1e71b06SSteven Whitehouse 	 * so we need to supply one here. It doesn't happen often.
443b1e71b06SSteven Whitehouse 	 */
444b1e71b06SSteven Whitehouse 	if (unlikely(page->index)) {
44509cbfeafSKirill A. Shutemov 		zero_user(page, 0, PAGE_SIZE);
446b1e71b06SSteven Whitehouse 		SetPageUptodate(page);
447b1e71b06SSteven Whitehouse 		return 0;
448b1e71b06SSteven Whitehouse 	}
449b1e71b06SSteven Whitehouse 
450b1e71b06SSteven Whitehouse 	error = gfs2_meta_inode_buffer(ip, &dibh);
451b1e71b06SSteven Whitehouse 	if (error)
452b1e71b06SSteven Whitehouse 		return error;
453b1e71b06SSteven Whitehouse 
454d9349285SCong Wang 	kaddr = kmap_atomic(page);
455235628c5SAndreas Gruenbacher 	if (dsize > gfs2_max_stuffed_size(ip))
456235628c5SAndreas Gruenbacher 		dsize = gfs2_max_stuffed_size(ip);
457602c89d2SSteven Whitehouse 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
45809cbfeafSKirill A. Shutemov 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
459d9349285SCong Wang 	kunmap_atomic(kaddr);
460b1e71b06SSteven Whitehouse 	flush_dcache_page(page);
461b1e71b06SSteven Whitehouse 	brelse(dibh);
462b1e71b06SSteven Whitehouse 	SetPageUptodate(page);
463b1e71b06SSteven Whitehouse 
464b1e71b06SSteven Whitehouse 	return 0;
465b1e71b06SSteven Whitehouse }
466b1e71b06SSteven Whitehouse 
467e9b5b23eSMatthew Wilcox (Oracle) /**
468e9b5b23eSMatthew Wilcox (Oracle)  * gfs2_read_folio - read a folio from a file
469e9b5b23eSMatthew Wilcox (Oracle)  * @file: The file to read
470e9b5b23eSMatthew Wilcox (Oracle)  * @folio: The folio in the file
471e9b5b23eSMatthew Wilcox (Oracle)  */
472e9b5b23eSMatthew Wilcox (Oracle) static int gfs2_read_folio(struct file *file, struct folio *folio)
473b1e71b06SSteven Whitehouse {
474e9b5b23eSMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
4752164f9b9SChristoph Hellwig 	struct gfs2_inode *ip = GFS2_I(inode);
4762164f9b9SChristoph Hellwig 	struct gfs2_sbd *sdp = GFS2_SB(inode);
477b1e71b06SSteven Whitehouse 	int error;
478b1e71b06SSteven Whitehouse 
4792164f9b9SChristoph Hellwig 	if (!gfs2_is_jdata(ip) ||
480e9b5b23eSMatthew Wilcox (Oracle) 	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
4817479c505SMatthew Wilcox (Oracle) 		error = iomap_read_folio(folio, &gfs2_iomap_ops);
482f95cbb44SAndreas Gruenbacher 	} else if (gfs2_is_stuffed(ip)) {
483e9b5b23eSMatthew Wilcox (Oracle) 		error = stuffed_readpage(ip, &folio->page);
484e9b5b23eSMatthew Wilcox (Oracle) 		folio_unlock(folio);
485b1e71b06SSteven Whitehouse 	} else {
486f132ab7dSMatthew Wilcox (Oracle) 		error = mpage_read_folio(folio, gfs2_block_map);
487b1e71b06SSteven Whitehouse 	}
488b1e71b06SSteven Whitehouse 
489eb43e660SBob Peterson 	if (unlikely(gfs2_withdrawn(sdp)))
490b1e71b06SSteven Whitehouse 		return -EIO;
491b1e71b06SSteven Whitehouse 
492b1e71b06SSteven Whitehouse 	return error;
493b1e71b06SSteven Whitehouse }
494b1e71b06SSteven Whitehouse 
495b1e71b06SSteven Whitehouse /**
496b1e71b06SSteven Whitehouse  * gfs2_internal_read - read an internal file
497b1e71b06SSteven Whitehouse  * @ip: The gfs2 inode
498b1e71b06SSteven Whitehouse  * @buf: The buffer to fill
499b1e71b06SSteven Whitehouse  * @pos: The file position
500b1e71b06SSteven Whitehouse  * @size: The amount to read
501b1e71b06SSteven Whitehouse  *
502b1e71b06SSteven Whitehouse  */
503b1e71b06SSteven Whitehouse 
5044306629eSAndrew Price int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
5054306629eSAndrew Price                        unsigned size)
506b1e71b06SSteven Whitehouse {
507b1e71b06SSteven Whitehouse 	struct address_space *mapping = ip->i_inode.i_mapping;
50845eb0504SAndreas Gruenbacher 	unsigned long index = *pos >> PAGE_SHIFT;
50909cbfeafSKirill A. Shutemov 	unsigned offset = *pos & (PAGE_SIZE - 1);
510b1e71b06SSteven Whitehouse 	unsigned copied = 0;
511b1e71b06SSteven Whitehouse 	unsigned amt;
512b1e71b06SSteven Whitehouse 	struct page *page;
513b1e71b06SSteven Whitehouse 	void *p;
514b1e71b06SSteven Whitehouse 
515b1e71b06SSteven Whitehouse 	do {
516b1e71b06SSteven Whitehouse 		amt = size - copied;
51709cbfeafSKirill A. Shutemov 		if (offset + size > PAGE_SIZE)
51809cbfeafSKirill A. Shutemov 			amt = PAGE_SIZE - offset;
519e9b5b23eSMatthew Wilcox (Oracle) 		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
520b1e71b06SSteven Whitehouse 		if (IS_ERR(page))
521b1e71b06SSteven Whitehouse 			return PTR_ERR(page);
522d9349285SCong Wang 		p = kmap_atomic(page);
523b1e71b06SSteven Whitehouse 		memcpy(buf + copied, p + offset, amt);
524d9349285SCong Wang 		kunmap_atomic(p);
52509cbfeafSKirill A. Shutemov 		put_page(page);
526b1e71b06SSteven Whitehouse 		copied += amt;
527b1e71b06SSteven Whitehouse 		index++;
528b1e71b06SSteven Whitehouse 		offset = 0;
529b1e71b06SSteven Whitehouse 	} while(copied < size);
530b1e71b06SSteven Whitehouse 	(*pos) += size;
531b1e71b06SSteven Whitehouse 	return size;
532b1e71b06SSteven Whitehouse }
533b1e71b06SSteven Whitehouse 
534b1e71b06SSteven Whitehouse /**
535d4388340SMatthew Wilcox (Oracle)  * gfs2_readahead - Read a bunch of pages at once
536c551f66cSLee Jones  * @rac: Read-ahead control structure
537b1e71b06SSteven Whitehouse  *
538b1e71b06SSteven Whitehouse  * Some notes:
539b1e71b06SSteven Whitehouse  * 1. This is only for readahead, so we can simply ignore any things
540b1e71b06SSteven Whitehouse  *    which are slightly inconvenient (such as locking conflicts between
541b1e71b06SSteven Whitehouse  *    the page lock and the glock) and return having done no I/O. Its
542b1e71b06SSteven Whitehouse  *    obviously not something we'd want to do on too regular a basis.
543b1e71b06SSteven Whitehouse  *    Any I/O we ignore at this time will be done via readpage later.
544b1e71b06SSteven Whitehouse  * 2. We don't handle stuffed files here we let readpage do the honours.
545d4388340SMatthew Wilcox (Oracle)  * 3. mpage_readahead() does most of the heavy lifting in the common case.
546b1e71b06SSteven Whitehouse  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
547b1e71b06SSteven Whitehouse  */
548b1e71b06SSteven Whitehouse 
549d4388340SMatthew Wilcox (Oracle) static void gfs2_readahead(struct readahead_control *rac)
550b1e71b06SSteven Whitehouse {
551d4388340SMatthew Wilcox (Oracle) 	struct inode *inode = rac->mapping->host;
552b1e71b06SSteven Whitehouse 	struct gfs2_inode *ip = GFS2_I(inode);
553b1e71b06SSteven Whitehouse 
5542164f9b9SChristoph Hellwig 	if (gfs2_is_stuffed(ip))
5552164f9b9SChristoph Hellwig 		;
5562164f9b9SChristoph Hellwig 	else if (gfs2_is_jdata(ip))
557d4388340SMatthew Wilcox (Oracle) 		mpage_readahead(rac, gfs2_block_map);
5582164f9b9SChristoph Hellwig 	else
5592164f9b9SChristoph Hellwig 		iomap_readahead(rac, &gfs2_iomap_ops);
560b1e71b06SSteven Whitehouse }
561b1e71b06SSteven Whitehouse 
562b1e71b06SSteven Whitehouse /**
563b1e71b06SSteven Whitehouse  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
564b1e71b06SSteven Whitehouse  * @inode: the rindex inode
565b1e71b06SSteven Whitehouse  */
56664bc06bbSAndreas Gruenbacher void adjust_fs_space(struct inode *inode)
567b1e71b06SSteven Whitehouse {
568d0a22a4bSAndreas Gruenbacher 	struct gfs2_sbd *sdp = GFS2_SB(inode);
5691946f70aSBenjamin Marzinski 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
570b1e71b06SSteven Whitehouse 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
571b1e71b06SSteven Whitehouse 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
57270c11ba8SBob Peterson 	struct buffer_head *m_bh;
573b1e71b06SSteven Whitehouse 	u64 fs_total, new_free;
574b1e71b06SSteven Whitehouse 
575d0a22a4bSAndreas Gruenbacher 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
576d0a22a4bSAndreas Gruenbacher 		return;
577d0a22a4bSAndreas Gruenbacher 
578b1e71b06SSteven Whitehouse 	/* Total up the file system space, according to the latest rindex. */
579b1e71b06SSteven Whitehouse 	fs_total = gfs2_ri_total(sdp);
5801946f70aSBenjamin Marzinski 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
581d0a22a4bSAndreas Gruenbacher 		goto out;
582b1e71b06SSteven Whitehouse 
583b1e71b06SSteven Whitehouse 	spin_lock(&sdp->sd_statfs_spin);
5841946f70aSBenjamin Marzinski 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
5851946f70aSBenjamin Marzinski 			      sizeof(struct gfs2_dinode));
586b1e71b06SSteven Whitehouse 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
587b1e71b06SSteven Whitehouse 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
588b1e71b06SSteven Whitehouse 	else
589b1e71b06SSteven Whitehouse 		new_free = 0;
590b1e71b06SSteven Whitehouse 	spin_unlock(&sdp->sd_statfs_spin);
591b1e71b06SSteven Whitehouse 	fs_warn(sdp, "File system extended by %llu blocks.\n",
592b1e71b06SSteven Whitehouse 		(unsigned long long)new_free);
593b1e71b06SSteven Whitehouse 	gfs2_statfs_change(sdp, new_free, new_free, 0);
5941946f70aSBenjamin Marzinski 
59570c11ba8SBob Peterson 	update_statfs(sdp, m_bh);
5961946f70aSBenjamin Marzinski 	brelse(m_bh);
597d0a22a4bSAndreas Gruenbacher out:
598d0a22a4bSAndreas Gruenbacher 	sdp->sd_rindex_uptodate = 0;
599d0a22a4bSAndreas Gruenbacher 	gfs2_trans_end(sdp);
600b1e71b06SSteven Whitehouse }
601b1e71b06SSteven Whitehouse 
602e621900aSMatthew Wilcox (Oracle) static bool jdata_dirty_folio(struct address_space *mapping,
603e621900aSMatthew Wilcox (Oracle) 		struct folio *folio)
604b1e71b06SSteven Whitehouse {
6056302d6f4SBob Peterson 	if (current->journal_info)
606e621900aSMatthew Wilcox (Oracle) 		folio_set_checked(folio);
607e621900aSMatthew Wilcox (Oracle) 	return block_dirty_folio(mapping, folio);
608b1e71b06SSteven Whitehouse }
609b1e71b06SSteven Whitehouse 
610b1e71b06SSteven Whitehouse /**
611b1e71b06SSteven Whitehouse  * gfs2_bmap - Block map function
612b1e71b06SSteven Whitehouse  * @mapping: Address space info
613b1e71b06SSteven Whitehouse  * @lblock: The block to map
614b1e71b06SSteven Whitehouse  *
615b1e71b06SSteven Whitehouse  * Returns: The disk address for the block or 0 on hole or error
616b1e71b06SSteven Whitehouse  */
617b1e71b06SSteven Whitehouse 
618b1e71b06SSteven Whitehouse static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
619b1e71b06SSteven Whitehouse {
620b1e71b06SSteven Whitehouse 	struct gfs2_inode *ip = GFS2_I(mapping->host);
621b1e71b06SSteven Whitehouse 	struct gfs2_holder i_gh;
622b1e71b06SSteven Whitehouse 	sector_t dblock = 0;
623b1e71b06SSteven Whitehouse 	int error;
624b1e71b06SSteven Whitehouse 
625b1e71b06SSteven Whitehouse 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
626b1e71b06SSteven Whitehouse 	if (error)
627b1e71b06SSteven Whitehouse 		return 0;
628b1e71b06SSteven Whitehouse 
629b1e71b06SSteven Whitehouse 	if (!gfs2_is_stuffed(ip))
6307770c93aSChristoph Hellwig 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
631b1e71b06SSteven Whitehouse 
632b1e71b06SSteven Whitehouse 	gfs2_glock_dq_uninit(&i_gh);
633b1e71b06SSteven Whitehouse 
634b1e71b06SSteven Whitehouse 	return dblock;
635b1e71b06SSteven Whitehouse }
636b1e71b06SSteven Whitehouse 
637b1e71b06SSteven Whitehouse static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
638b1e71b06SSteven Whitehouse {
639b1e71b06SSteven Whitehouse 	struct gfs2_bufdata *bd;
640b1e71b06SSteven Whitehouse 
641b1e71b06SSteven Whitehouse 	lock_buffer(bh);
642b1e71b06SSteven Whitehouse 	gfs2_log_lock(sdp);
643b1e71b06SSteven Whitehouse 	clear_buffer_dirty(bh);
644b1e71b06SSteven Whitehouse 	bd = bh->b_private;
645b1e71b06SSteven Whitehouse 	if (bd) {
646c0752aa7SBob Peterson 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
647c0752aa7SBob Peterson 			list_del_init(&bd->bd_list);
64868942870SBob Peterson 		else {
64968942870SBob Peterson 			spin_lock(&sdp->sd_ail_lock);
65068cd4ce2SBob Peterson 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
65168942870SBob Peterson 			spin_unlock(&sdp->sd_ail_lock);
65268942870SBob Peterson 		}
653b1e71b06SSteven Whitehouse 	}
654b1e71b06SSteven Whitehouse 	bh->b_bdev = NULL;
655b1e71b06SSteven Whitehouse 	clear_buffer_mapped(bh);
656b1e71b06SSteven Whitehouse 	clear_buffer_req(bh);
657b1e71b06SSteven Whitehouse 	clear_buffer_new(bh);
658b1e71b06SSteven Whitehouse 	gfs2_log_unlock(sdp);
659b1e71b06SSteven Whitehouse 	unlock_buffer(bh);
660b1e71b06SSteven Whitehouse }
661b1e71b06SSteven Whitehouse 
6625f4b2976SMatthew Wilcox (Oracle) static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
6635f4b2976SMatthew Wilcox (Oracle) 				size_t length)
664b1e71b06SSteven Whitehouse {
6655f4b2976SMatthew Wilcox (Oracle) 	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
6665f4b2976SMatthew Wilcox (Oracle) 	size_t stop = offset + length;
6675f4b2976SMatthew Wilcox (Oracle) 	int partial_page = (offset || length < folio_size(folio));
668b1e71b06SSteven Whitehouse 	struct buffer_head *bh, *head;
669b1e71b06SSteven Whitehouse 	unsigned long pos = 0;
670b1e71b06SSteven Whitehouse 
6715f4b2976SMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
6725c0bb97cSLukas Czerner 	if (!partial_page)
6735f4b2976SMatthew Wilcox (Oracle) 		folio_clear_checked(folio);
6745f4b2976SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
6755f4b2976SMatthew Wilcox (Oracle) 	if (!head)
676b1e71b06SSteven Whitehouse 		goto out;
677b1e71b06SSteven Whitehouse 
6785f4b2976SMatthew Wilcox (Oracle) 	bh = head;
679b1e71b06SSteven Whitehouse 	do {
6805c0bb97cSLukas Czerner 		if (pos + bh->b_size > stop)
6815c0bb97cSLukas Czerner 			return;
6825c0bb97cSLukas Czerner 
683b1e71b06SSteven Whitehouse 		if (offset <= pos)
684b1e71b06SSteven Whitehouse 			gfs2_discard(sdp, bh);
685b1e71b06SSteven Whitehouse 		pos += bh->b_size;
686b1e71b06SSteven Whitehouse 		bh = bh->b_this_page;
687b1e71b06SSteven Whitehouse 	} while (bh != head);
688b1e71b06SSteven Whitehouse out:
6895c0bb97cSLukas Czerner 	if (!partial_page)
6905f4b2976SMatthew Wilcox (Oracle) 		filemap_release_folio(folio, 0);
691b1e71b06SSteven Whitehouse }
692b1e71b06SSteven Whitehouse 
693b1e71b06SSteven Whitehouse /**
694e45c20d1SMatthew Wilcox (Oracle)  * gfs2_release_folio - free the metadata associated with a folio
695e45c20d1SMatthew Wilcox (Oracle)  * @folio: the folio that's being released
696b1e71b06SSteven Whitehouse  * @gfp_mask: passed from Linux VFS, ignored by us
697b1e71b06SSteven Whitehouse  *
698e45c20d1SMatthew Wilcox (Oracle)  * Calls try_to_free_buffers() to free the buffers and put the folio if the
6990ebbe4f9SAndreas Gruenbacher  * buffers can be released.
700b1e71b06SSteven Whitehouse  *
701e45c20d1SMatthew Wilcox (Oracle)  * Returns: true if the folio was put or else false
702b1e71b06SSteven Whitehouse  */
703b1e71b06SSteven Whitehouse 
704e45c20d1SMatthew Wilcox (Oracle) bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
705b1e71b06SSteven Whitehouse {
706e45c20d1SMatthew Wilcox (Oracle) 	struct address_space *mapping = folio->mapping;
707009d8518SSteven Whitehouse 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
708b1e71b06SSteven Whitehouse 	struct buffer_head *bh, *head;
709b1e71b06SSteven Whitehouse 	struct gfs2_bufdata *bd;
710b1e71b06SSteven Whitehouse 
711e45c20d1SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
712e45c20d1SMatthew Wilcox (Oracle) 	if (!head)
713e45c20d1SMatthew Wilcox (Oracle) 		return false;
714b1e71b06SSteven Whitehouse 
7151c185c02SAndreas Gruenbacher 	/*
716e45c20d1SMatthew Wilcox (Oracle) 	 * mm accommodates an old ext3 case where clean folios might
717e45c20d1SMatthew Wilcox (Oracle) 	 * not have had the dirty bit cleared.	Thus, it can send actual
718e45c20d1SMatthew Wilcox (Oracle) 	 * dirty folios to ->release_folio() via shrink_active_list().
7191c185c02SAndreas Gruenbacher 	 *
720e45c20d1SMatthew Wilcox (Oracle) 	 * As a workaround, we skip folios that contain dirty buffers
721e45c20d1SMatthew Wilcox (Oracle) 	 * below.  Once ->release_folio isn't called on dirty folios
722e45c20d1SMatthew Wilcox (Oracle) 	 * anymore, we can warn on dirty buffers like we used to here
723e45c20d1SMatthew Wilcox (Oracle) 	 * again.
7241c185c02SAndreas Gruenbacher 	 */
7251c185c02SAndreas Gruenbacher 
726b1e71b06SSteven Whitehouse 	gfs2_log_lock(sdp);
727e45c20d1SMatthew Wilcox (Oracle) 	bh = head;
728b1e71b06SSteven Whitehouse 	do {
729b1e71b06SSteven Whitehouse 		if (atomic_read(&bh->b_count))
730b1e71b06SSteven Whitehouse 			goto cannot_release;
731b1e71b06SSteven Whitehouse 		bd = bh->b_private;
73216ca9412SBenjamin Marzinski 		if (bd && bd->bd_tr)
733b1e71b06SSteven Whitehouse 			goto cannot_release;
7341c185c02SAndreas Gruenbacher 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
7351c185c02SAndreas Gruenbacher 			goto cannot_release;
736b1e71b06SSteven Whitehouse 		bh = bh->b_this_page;
737b1e71b06SSteven Whitehouse 	} while (bh != head);
738b1e71b06SSteven Whitehouse 
739e45c20d1SMatthew Wilcox (Oracle) 	bh = head;
740b1e71b06SSteven Whitehouse 	do {
741b1e71b06SSteven Whitehouse 		bd = bh->b_private;
742b1e71b06SSteven Whitehouse 		if (bd) {
743b1e71b06SSteven Whitehouse 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
744b1e71b06SSteven Whitehouse 			bd->bd_bh = NULL;
745b1e71b06SSteven Whitehouse 			bh->b_private = NULL;
746019dd669SBob Peterson 			/*
747019dd669SBob Peterson 			 * The bd may still be queued as a revoke, in which
748019dd669SBob Peterson 			 * case we must not dequeue nor free it.
749019dd669SBob Peterson 			 */
750019dd669SBob Peterson 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
751019dd669SBob Peterson 				list_del_init(&bd->bd_list);
752019dd669SBob Peterson 			if (list_empty(&bd->bd_list))
753b1e71b06SSteven Whitehouse 				kmem_cache_free(gfs2_bufdata_cachep, bd);
754e4f29206SSteven Whitehouse 		}
755b1e71b06SSteven Whitehouse 
756b1e71b06SSteven Whitehouse 		bh = bh->b_this_page;
757b1e71b06SSteven Whitehouse 	} while (bh != head);
758e4f29206SSteven Whitehouse 	gfs2_log_unlock(sdp);
759b1e71b06SSteven Whitehouse 
760*68189fefSMatthew Wilcox (Oracle) 	return try_to_free_buffers(folio);
7618f065d36SSteven Whitehouse 
762b1e71b06SSteven Whitehouse cannot_release:
763b1e71b06SSteven Whitehouse 	gfs2_log_unlock(sdp);
764e45c20d1SMatthew Wilcox (Oracle) 	return false;
765b1e71b06SSteven Whitehouse }
766b1e71b06SSteven Whitehouse 
767eadd7535SChristoph Hellwig static const struct address_space_operations gfs2_aops = {
7689d358143SSteven Whitehouse 	.writepage = gfs2_writepage,
76945138990SSteven Whitehouse 	.writepages = gfs2_writepages,
770f132ab7dSMatthew Wilcox (Oracle) 	.read_folio = gfs2_read_folio,
771d4388340SMatthew Wilcox (Oracle) 	.readahead = gfs2_readahead,
772187c82cbSMatthew Wilcox (Oracle) 	.dirty_folio = filemap_dirty_folio,
7738597447dSMatthew Wilcox (Oracle) 	.release_folio = iomap_release_folio,
774d82354f6SMatthew Wilcox (Oracle) 	.invalidate_folio = iomap_invalidate_folio,
775b1e71b06SSteven Whitehouse 	.bmap = gfs2_bmap,
776967bcc91SAndreas Gruenbacher 	.direct_IO = noop_direct_IO,
7772164f9b9SChristoph Hellwig 	.migratepage = iomap_migrate_page,
7782164f9b9SChristoph Hellwig 	.is_partially_uptodate = iomap_is_partially_uptodate,
779aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
780b1e71b06SSteven Whitehouse };
781b1e71b06SSteven Whitehouse 
782b1e71b06SSteven Whitehouse static const struct address_space_operations gfs2_jdata_aops = {
783b1e71b06SSteven Whitehouse 	.writepage = gfs2_jdata_writepage,
784b1e71b06SSteven Whitehouse 	.writepages = gfs2_jdata_writepages,
785f132ab7dSMatthew Wilcox (Oracle) 	.read_folio = gfs2_read_folio,
786d4388340SMatthew Wilcox (Oracle) 	.readahead = gfs2_readahead,
787e621900aSMatthew Wilcox (Oracle) 	.dirty_folio = jdata_dirty_folio,
788b1e71b06SSteven Whitehouse 	.bmap = gfs2_bmap,
7895f4b2976SMatthew Wilcox (Oracle) 	.invalidate_folio = gfs2_invalidate_folio,
790e45c20d1SMatthew Wilcox (Oracle) 	.release_folio = gfs2_release_folio,
791b1e71b06SSteven Whitehouse 	.is_partially_uptodate = block_is_partially_uptodate,
792aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
793b1e71b06SSteven Whitehouse };
794b1e71b06SSteven Whitehouse 
795b1e71b06SSteven Whitehouse void gfs2_set_aops(struct inode *inode)
796b1e71b06SSteven Whitehouse {
797eadd7535SChristoph Hellwig 	if (gfs2_is_jdata(GFS2_I(inode)))
798b1e71b06SSteven Whitehouse 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
799b1e71b06SSteven Whitehouse 	else
800eadd7535SChristoph Hellwig 		inode->i_mapping->a_ops = &gfs2_aops;
801b1e71b06SSteven Whitehouse }
802