xref: /openbmc/linux/fs/gfs2/aops.c (revision d6b412c5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23 
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38 
39 
gfs2_trans_add_databufs(struct gfs2_inode * ip,struct folio * folio,size_t from,size_t len)40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41 			     size_t from, size_t len)
42 {
43 	struct buffer_head *head = folio_buffers(folio);
44 	unsigned int bsize = head->b_size;
45 	struct buffer_head *bh;
46 	size_t to = from + len;
47 	size_t start, end;
48 
49 	for (bh = head, start = 0; bh != head || !start;
50 	     bh = bh->b_this_page, start = end) {
51 		end = start + bsize;
52 		if (end <= from)
53 			continue;
54 		if (start >= to)
55 			break;
56 		set_buffer_uptodate(bh);
57 		gfs2_trans_add_data(ip->i_gl, bh);
58 	}
59 }
60 
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70 
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 				  struct buffer_head *bh_result, int create)
73 {
74 	int error;
75 
76 	error = gfs2_block_map(inode, lblock, bh_result, 0);
77 	if (error)
78 		return error;
79 	if (!buffer_mapped(bh_result))
80 		return -ENODATA;
81 	return 0;
82 }
83 
84 /**
85  * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
86  * @folio: The folio to write
87  * @wbc: The writeback control
88  *
89  * This is the same as calling block_write_full_page, but it also
90  * writes pages outside of i_size
91  */
gfs2_write_jdata_folio(struct folio * folio,struct writeback_control * wbc)92 static int gfs2_write_jdata_folio(struct folio *folio,
93 				 struct writeback_control *wbc)
94 {
95 	struct inode * const inode = folio->mapping->host;
96 	loff_t i_size = i_size_read(inode);
97 
98 	/*
99 	 * The folio straddles i_size.  It must be zeroed out on each and every
100 	 * writepage invocation because it may be mmapped.  "A file is mapped
101 	 * in multiples of the page size.  For a file that is not a multiple of
102 	 * the page size, the remaining memory is zeroed when mapped, and
103 	 * writes to that region are not written out to the file."
104 	 */
105 	if (folio_pos(folio) < i_size &&
106 	    i_size < folio_pos(folio) + folio_size(folio))
107 		folio_zero_segment(folio, offset_in_folio(folio, i_size),
108 				folio_size(folio));
109 
110 	return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111 			wbc, end_buffer_async_write);
112 }
113 
114 /**
115  * __gfs2_jdata_write_folio - The core of jdata writepage
116  * @folio: The folio to write
117  * @wbc: The writeback control
118  *
119  * This is shared between writepage and writepages and implements the
120  * core of the writepage operation. If a transaction is required then
121  * the checked flag will have been set and the transaction will have
122  * already been started before this is called.
123  */
__gfs2_jdata_write_folio(struct folio * folio,struct writeback_control * wbc)124 static int __gfs2_jdata_write_folio(struct folio *folio,
125 		struct writeback_control *wbc)
126 {
127 	struct inode *inode = folio->mapping->host;
128 	struct gfs2_inode *ip = GFS2_I(inode);
129 
130 	if (folio_test_checked(folio)) {
131 		folio_clear_checked(folio);
132 		if (!folio_buffers(folio)) {
133 			folio_create_empty_buffers(folio,
134 					inode->i_sb->s_blocksize,
135 					BIT(BH_Dirty)|BIT(BH_Uptodate));
136 		}
137 		gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
138 	}
139 	return gfs2_write_jdata_folio(folio, wbc);
140 }
141 
142 /**
143  * gfs2_jdata_writepage - Write complete page
144  * @page: Page to write
145  * @wbc: The writeback control
146  *
147  * Returns: errno
148  *
149  */
150 
gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)151 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152 {
153 	struct folio *folio = page_folio(page);
154 	struct inode *inode = page->mapping->host;
155 	struct gfs2_inode *ip = GFS2_I(inode);
156 	struct gfs2_sbd *sdp = GFS2_SB(inode);
157 
158 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159 		goto out;
160 	if (folio_test_checked(folio) || current->journal_info)
161 		goto out_ignore;
162 	return __gfs2_jdata_write_folio(folio, wbc);
163 
164 out_ignore:
165 	folio_redirty_for_writepage(wbc, folio);
166 out:
167 	folio_unlock(folio);
168 	return 0;
169 }
170 
171 /**
172  * gfs2_writepages - Write a bunch of dirty pages back to disk
173  * @mapping: The mapping to write
174  * @wbc: Write-back control
175  *
176  * Used for both ordered and writeback modes.
177  */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)178 static int gfs2_writepages(struct address_space *mapping,
179 			   struct writeback_control *wbc)
180 {
181 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182 	struct iomap_writepage_ctx wpc = { };
183 	int ret;
184 
185 	/*
186 	 * Even if we didn't write enough pages here, we might still be holding
187 	 * dirty pages in the ail. We forcibly flush the ail because we don't
188 	 * want balance_dirty_pages() to loop indefinitely trying to write out
189 	 * pages held in the ail that it can't find.
190 	 */
191 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192 	if (ret == 0 && wbc->nr_to_write > 0)
193 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194 	return ret;
195 }
196 
197 /**
198  * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
199  * @mapping: The mapping
200  * @wbc: The writeback control
201  * @fbatch: The batch of folios
202  * @done_index: Page index
203  *
204  * Returns: non-zero if loop should terminate, zero otherwise
205  */
206 
gfs2_write_jdata_batch(struct address_space * mapping,struct writeback_control * wbc,struct folio_batch * fbatch,pgoff_t * done_index)207 static int gfs2_write_jdata_batch(struct address_space *mapping,
208 				    struct writeback_control *wbc,
209 				    struct folio_batch *fbatch,
210 				    pgoff_t *done_index)
211 {
212 	struct inode *inode = mapping->host;
213 	struct gfs2_sbd *sdp = GFS2_SB(inode);
214 	unsigned nrblocks;
215 	int i;
216 	int ret;
217 	int nr_pages = 0;
218 	int nr_folios = folio_batch_count(fbatch);
219 
220 	for (i = 0; i < nr_folios; i++)
221 		nr_pages += folio_nr_pages(fbatch->folios[i]);
222 	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
223 
224 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
225 	if (ret < 0)
226 		return ret;
227 
228 	for (i = 0; i < nr_folios; i++) {
229 		struct folio *folio = fbatch->folios[i];
230 
231 		*done_index = folio->index;
232 
233 		folio_lock(folio);
234 
235 		if (unlikely(folio->mapping != mapping)) {
236 continue_unlock:
237 			folio_unlock(folio);
238 			continue;
239 		}
240 
241 		if (!folio_test_dirty(folio)) {
242 			/* someone wrote it for us */
243 			goto continue_unlock;
244 		}
245 
246 		if (folio_test_writeback(folio)) {
247 			if (wbc->sync_mode != WB_SYNC_NONE)
248 				folio_wait_writeback(folio);
249 			else
250 				goto continue_unlock;
251 		}
252 
253 		BUG_ON(folio_test_writeback(folio));
254 		if (!folio_clear_dirty_for_io(folio))
255 			goto continue_unlock;
256 
257 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
258 
259 		ret = __gfs2_jdata_write_folio(folio, wbc);
260 		if (unlikely(ret)) {
261 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
262 				folio_unlock(folio);
263 				ret = 0;
264 			} else {
265 
266 				/*
267 				 * done_index is set past this page,
268 				 * so media errors will not choke
269 				 * background writeout for the entire
270 				 * file. This has consequences for
271 				 * range_cyclic semantics (ie. it may
272 				 * not be suitable for data integrity
273 				 * writeout).
274 				 */
275 				*done_index = folio_next_index(folio);
276 				ret = 1;
277 				break;
278 			}
279 		}
280 
281 		/*
282 		 * We stop writing back only if we are not doing
283 		 * integrity sync. In case of integrity sync we have to
284 		 * keep going until we have written all the pages
285 		 * we tagged for writeback prior to entering this loop.
286 		 */
287 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
288 			ret = 1;
289 			break;
290 		}
291 
292 	}
293 	gfs2_trans_end(sdp);
294 	return ret;
295 }
296 
297 /**
298  * gfs2_write_cache_jdata - Like write_cache_pages but different
299  * @mapping: The mapping to write
300  * @wbc: The writeback control
301  *
302  * The reason that we use our own function here is that we need to
303  * start transactions before we grab page locks. This allows us
304  * to get the ordering right.
305  */
306 
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)307 static int gfs2_write_cache_jdata(struct address_space *mapping,
308 				  struct writeback_control *wbc)
309 {
310 	int ret = 0;
311 	int done = 0;
312 	struct folio_batch fbatch;
313 	int nr_folios;
314 	pgoff_t writeback_index;
315 	pgoff_t index;
316 	pgoff_t end;
317 	pgoff_t done_index;
318 	int cycled;
319 	int range_whole = 0;
320 	xa_mark_t tag;
321 
322 	folio_batch_init(&fbatch);
323 	if (wbc->range_cyclic) {
324 		writeback_index = mapping->writeback_index; /* prev offset */
325 		index = writeback_index;
326 		if (index == 0)
327 			cycled = 1;
328 		else
329 			cycled = 0;
330 		end = -1;
331 	} else {
332 		index = wbc->range_start >> PAGE_SHIFT;
333 		end = wbc->range_end >> PAGE_SHIFT;
334 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335 			range_whole = 1;
336 		cycled = 1; /* ignore range_cyclic tests */
337 	}
338 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339 		tag = PAGECACHE_TAG_TOWRITE;
340 	else
341 		tag = PAGECACHE_TAG_DIRTY;
342 
343 retry:
344 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345 		tag_pages_for_writeback(mapping, index, end);
346 	done_index = index;
347 	while (!done && (index <= end)) {
348 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
349 				tag, &fbatch);
350 		if (nr_folios == 0)
351 			break;
352 
353 		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354 				&done_index);
355 		if (ret)
356 			done = 1;
357 		if (ret > 0)
358 			ret = 0;
359 		folio_batch_release(&fbatch);
360 		cond_resched();
361 	}
362 
363 	if (!cycled && !done) {
364 		/*
365 		 * range_cyclic:
366 		 * We hit the last page and there is more work to be done: wrap
367 		 * back to the start of the file
368 		 */
369 		cycled = 1;
370 		index = 0;
371 		end = writeback_index - 1;
372 		goto retry;
373 	}
374 
375 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
376 		mapping->writeback_index = done_index;
377 
378 	return ret;
379 }
380 
381 
382 /**
383  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384  * @mapping: The mapping to write
385  * @wbc: The writeback control
386  *
387  */
388 
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)389 static int gfs2_jdata_writepages(struct address_space *mapping,
390 				 struct writeback_control *wbc)
391 {
392 	struct gfs2_inode *ip = GFS2_I(mapping->host);
393 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394 	int ret;
395 
396 	ret = gfs2_write_cache_jdata(mapping, wbc);
397 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
398 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399 			       GFS2_LFC_JDATA_WPAGES);
400 		ret = gfs2_write_cache_jdata(mapping, wbc);
401 	}
402 	return ret;
403 }
404 
405 /**
406  * stuffed_readpage - Fill in a Linux page with stuffed file data
407  * @ip: the inode
408  * @page: the page
409  *
410  * Returns: errno
411  */
stuffed_readpage(struct gfs2_inode * ip,struct page * page)412 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
413 {
414 	struct buffer_head *dibh;
415 	u64 dsize = i_size_read(&ip->i_inode);
416 	void *kaddr;
417 	int error;
418 
419 	/*
420 	 * Due to the order of unstuffing files and ->fault(), we can be
421 	 * asked for a zero page in the case of a stuffed file being extended,
422 	 * so we need to supply one here. It doesn't happen often.
423 	 */
424 	if (unlikely(page->index)) {
425 		zero_user(page, 0, PAGE_SIZE);
426 		SetPageUptodate(page);
427 		return 0;
428 	}
429 
430 	error = gfs2_meta_inode_buffer(ip, &dibh);
431 	if (error)
432 		return error;
433 
434 	kaddr = kmap_local_page(page);
435 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
436 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
437 	kunmap_local(kaddr);
438 	flush_dcache_page(page);
439 	brelse(dibh);
440 	SetPageUptodate(page);
441 
442 	return 0;
443 }
444 
445 /**
446  * gfs2_read_folio - read a folio from a file
447  * @file: The file to read
448  * @folio: The folio in the file
449  */
gfs2_read_folio(struct file * file,struct folio * folio)450 static int gfs2_read_folio(struct file *file, struct folio *folio)
451 {
452 	struct inode *inode = folio->mapping->host;
453 	struct gfs2_inode *ip = GFS2_I(inode);
454 	struct gfs2_sbd *sdp = GFS2_SB(inode);
455 	int error;
456 
457 	if (!gfs2_is_jdata(ip) ||
458 	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
459 		error = iomap_read_folio(folio, &gfs2_iomap_ops);
460 	} else if (gfs2_is_stuffed(ip)) {
461 		error = stuffed_readpage(ip, &folio->page);
462 		folio_unlock(folio);
463 	} else {
464 		error = mpage_read_folio(folio, gfs2_block_map);
465 	}
466 
467 	if (gfs2_withdrawing_or_withdrawn(sdp))
468 		return -EIO;
469 
470 	return error;
471 }
472 
473 /**
474  * gfs2_internal_read - read an internal file
475  * @ip: The gfs2 inode
476  * @buf: The buffer to fill
477  * @pos: The file position
478  * @size: The amount to read
479  *
480  */
481 
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,size_t size)482 ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
483 			   size_t size)
484 {
485 	struct address_space *mapping = ip->i_inode.i_mapping;
486 	unsigned long index = *pos >> PAGE_SHIFT;
487 	size_t copied = 0;
488 
489 	do {
490 		size_t offset, chunk;
491 		struct folio *folio;
492 
493 		folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
494 		if (IS_ERR(folio)) {
495 			if (PTR_ERR(folio) == -EINTR)
496 				continue;
497 			return PTR_ERR(folio);
498 		}
499 		offset = *pos + copied - folio_pos(folio);
500 		chunk = min(size - copied, folio_size(folio) - offset);
501 		memcpy_from_folio(buf + copied, folio, offset, chunk);
502 		index = folio_next_index(folio);
503 		folio_put(folio);
504 		copied += chunk;
505 	} while(copied < size);
506 	(*pos) += size;
507 	return size;
508 }
509 
510 /**
511  * gfs2_readahead - Read a bunch of pages at once
512  * @rac: Read-ahead control structure
513  *
514  * Some notes:
515  * 1. This is only for readahead, so we can simply ignore any things
516  *    which are slightly inconvenient (such as locking conflicts between
517  *    the page lock and the glock) and return having done no I/O. Its
518  *    obviously not something we'd want to do on too regular a basis.
519  *    Any I/O we ignore at this time will be done via readpage later.
520  * 2. We don't handle stuffed files here we let readpage do the honours.
521  * 3. mpage_readahead() does most of the heavy lifting in the common case.
522  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
523  */
524 
gfs2_readahead(struct readahead_control * rac)525 static void gfs2_readahead(struct readahead_control *rac)
526 {
527 	struct inode *inode = rac->mapping->host;
528 	struct gfs2_inode *ip = GFS2_I(inode);
529 
530 	if (gfs2_is_stuffed(ip))
531 		;
532 	else if (gfs2_is_jdata(ip))
533 		mpage_readahead(rac, gfs2_block_map);
534 	else
535 		iomap_readahead(rac, &gfs2_iomap_ops);
536 }
537 
538 /**
539  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
540  * @inode: the rindex inode
541  */
adjust_fs_space(struct inode * inode)542 void adjust_fs_space(struct inode *inode)
543 {
544 	struct gfs2_sbd *sdp = GFS2_SB(inode);
545 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
546 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
547 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
548 	struct buffer_head *m_bh;
549 	u64 fs_total, new_free;
550 
551 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
552 		return;
553 
554 	/* Total up the file system space, according to the latest rindex. */
555 	fs_total = gfs2_ri_total(sdp);
556 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
557 		goto out;
558 
559 	spin_lock(&sdp->sd_statfs_spin);
560 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
561 			      sizeof(struct gfs2_dinode));
562 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
563 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
564 	else
565 		new_free = 0;
566 	spin_unlock(&sdp->sd_statfs_spin);
567 	fs_warn(sdp, "File system extended by %llu blocks.\n",
568 		(unsigned long long)new_free);
569 	gfs2_statfs_change(sdp, new_free, new_free, 0);
570 
571 	update_statfs(sdp, m_bh);
572 	brelse(m_bh);
573 out:
574 	sdp->sd_rindex_uptodate = 0;
575 	gfs2_trans_end(sdp);
576 }
577 
jdata_dirty_folio(struct address_space * mapping,struct folio * folio)578 static bool jdata_dirty_folio(struct address_space *mapping,
579 		struct folio *folio)
580 {
581 	if (current->journal_info)
582 		folio_set_checked(folio);
583 	return block_dirty_folio(mapping, folio);
584 }
585 
586 /**
587  * gfs2_bmap - Block map function
588  * @mapping: Address space info
589  * @lblock: The block to map
590  *
591  * Returns: The disk address for the block or 0 on hole or error
592  */
593 
gfs2_bmap(struct address_space * mapping,sector_t lblock)594 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
595 {
596 	struct gfs2_inode *ip = GFS2_I(mapping->host);
597 	struct gfs2_holder i_gh;
598 	sector_t dblock = 0;
599 	int error;
600 
601 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
602 	if (error)
603 		return 0;
604 
605 	if (!gfs2_is_stuffed(ip))
606 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
607 
608 	gfs2_glock_dq_uninit(&i_gh);
609 
610 	return dblock;
611 }
612 
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)613 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
614 {
615 	struct gfs2_bufdata *bd;
616 
617 	lock_buffer(bh);
618 	gfs2_log_lock(sdp);
619 	clear_buffer_dirty(bh);
620 	bd = bh->b_private;
621 	if (bd) {
622 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
623 			list_del_init(&bd->bd_list);
624 		else {
625 			spin_lock(&sdp->sd_ail_lock);
626 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
627 			spin_unlock(&sdp->sd_ail_lock);
628 		}
629 	}
630 	bh->b_bdev = NULL;
631 	clear_buffer_mapped(bh);
632 	clear_buffer_req(bh);
633 	clear_buffer_new(bh);
634 	gfs2_log_unlock(sdp);
635 	unlock_buffer(bh);
636 }
637 
gfs2_invalidate_folio(struct folio * folio,size_t offset,size_t length)638 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
639 				size_t length)
640 {
641 	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
642 	size_t stop = offset + length;
643 	int partial_page = (offset || length < folio_size(folio));
644 	struct buffer_head *bh, *head;
645 	unsigned long pos = 0;
646 
647 	BUG_ON(!folio_test_locked(folio));
648 	if (!partial_page)
649 		folio_clear_checked(folio);
650 	head = folio_buffers(folio);
651 	if (!head)
652 		goto out;
653 
654 	bh = head;
655 	do {
656 		if (pos + bh->b_size > stop)
657 			return;
658 
659 		if (offset <= pos)
660 			gfs2_discard(sdp, bh);
661 		pos += bh->b_size;
662 		bh = bh->b_this_page;
663 	} while (bh != head);
664 out:
665 	if (!partial_page)
666 		filemap_release_folio(folio, 0);
667 }
668 
669 /**
670  * gfs2_release_folio - free the metadata associated with a folio
671  * @folio: the folio that's being released
672  * @gfp_mask: passed from Linux VFS, ignored by us
673  *
674  * Calls try_to_free_buffers() to free the buffers and put the folio if the
675  * buffers can be released.
676  *
677  * Returns: true if the folio was put or else false
678  */
679 
gfs2_release_folio(struct folio * folio,gfp_t gfp_mask)680 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
681 {
682 	struct address_space *mapping = folio->mapping;
683 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
684 	struct buffer_head *bh, *head;
685 	struct gfs2_bufdata *bd;
686 
687 	head = folio_buffers(folio);
688 	if (!head)
689 		return false;
690 
691 	/*
692 	 * mm accommodates an old ext3 case where clean folios might
693 	 * not have had the dirty bit cleared.	Thus, it can send actual
694 	 * dirty folios to ->release_folio() via shrink_active_list().
695 	 *
696 	 * As a workaround, we skip folios that contain dirty buffers
697 	 * below.  Once ->release_folio isn't called on dirty folios
698 	 * anymore, we can warn on dirty buffers like we used to here
699 	 * again.
700 	 */
701 
702 	gfs2_log_lock(sdp);
703 	bh = head;
704 	do {
705 		if (atomic_read(&bh->b_count))
706 			goto cannot_release;
707 		bd = bh->b_private;
708 		if (bd && bd->bd_tr)
709 			goto cannot_release;
710 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
711 			goto cannot_release;
712 		bh = bh->b_this_page;
713 	} while (bh != head);
714 
715 	bh = head;
716 	do {
717 		bd = bh->b_private;
718 		if (bd) {
719 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
720 			bd->bd_bh = NULL;
721 			bh->b_private = NULL;
722 			/*
723 			 * The bd may still be queued as a revoke, in which
724 			 * case we must not dequeue nor free it.
725 			 */
726 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
727 				list_del_init(&bd->bd_list);
728 			if (list_empty(&bd->bd_list))
729 				kmem_cache_free(gfs2_bufdata_cachep, bd);
730 		}
731 
732 		bh = bh->b_this_page;
733 	} while (bh != head);
734 	gfs2_log_unlock(sdp);
735 
736 	return try_to_free_buffers(folio);
737 
738 cannot_release:
739 	gfs2_log_unlock(sdp);
740 	return false;
741 }
742 
743 static const struct address_space_operations gfs2_aops = {
744 	.writepages = gfs2_writepages,
745 	.read_folio = gfs2_read_folio,
746 	.readahead = gfs2_readahead,
747 	.dirty_folio = iomap_dirty_folio,
748 	.release_folio = iomap_release_folio,
749 	.invalidate_folio = iomap_invalidate_folio,
750 	.bmap = gfs2_bmap,
751 	.migrate_folio = filemap_migrate_folio,
752 	.is_partially_uptodate = iomap_is_partially_uptodate,
753 	.error_remove_page = generic_error_remove_page,
754 };
755 
756 static const struct address_space_operations gfs2_jdata_aops = {
757 	.writepage = gfs2_jdata_writepage,
758 	.writepages = gfs2_jdata_writepages,
759 	.read_folio = gfs2_read_folio,
760 	.readahead = gfs2_readahead,
761 	.dirty_folio = jdata_dirty_folio,
762 	.bmap = gfs2_bmap,
763 	.invalidate_folio = gfs2_invalidate_folio,
764 	.release_folio = gfs2_release_folio,
765 	.is_partially_uptodate = block_is_partially_uptodate,
766 	.error_remove_page = generic_error_remove_page,
767 };
768 
gfs2_set_aops(struct inode * inode)769 void gfs2_set_aops(struct inode *inode)
770 {
771 	if (gfs2_is_jdata(GFS2_I(inode)))
772 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
773 	else
774 		inode->i_mapping->a_ops = &gfs2_aops;
775 }
776