xref: /openbmc/linux/fs/gfs2/aops.c (revision d0cfcaee)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23 
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38 
39 
40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41 			     unsigned int from, unsigned int len)
42 {
43 	struct buffer_head *head = folio_buffers(folio);
44 	unsigned int bsize = head->b_size;
45 	struct buffer_head *bh;
46 	unsigned int to = from + len;
47 	unsigned int start, end;
48 
49 	for (bh = head, start = 0; bh != head || !start;
50 	     bh = bh->b_this_page, start = end) {
51 		end = start + bsize;
52 		if (end <= from)
53 			continue;
54 		if (start >= to)
55 			break;
56 		set_buffer_uptodate(bh);
57 		gfs2_trans_add_data(ip->i_gl, bh);
58 	}
59 }
60 
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70 
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 				  struct buffer_head *bh_result, int create)
73 {
74 	int error;
75 
76 	error = gfs2_block_map(inode, lblock, bh_result, 0);
77 	if (error)
78 		return error;
79 	if (!buffer_mapped(bh_result))
80 		return -ENODATA;
81 	return 0;
82 }
83 
84 /**
85  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86  * @page: The page to write
87  * @wbc: The writeback control
88  *
89  * This is the same as calling block_write_full_page, but it also
90  * writes pages outside of i_size
91  */
92 static int gfs2_write_jdata_page(struct page *page,
93 				 struct writeback_control *wbc)
94 {
95 	struct inode * const inode = page->mapping->host;
96 	loff_t i_size = i_size_read(inode);
97 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
98 	unsigned offset;
99 
100 	/*
101 	 * The page straddles i_size.  It must be zeroed out on each and every
102 	 * writepage invocation because it may be mmapped.  "A file is mapped
103 	 * in multiples of the page size.  For a file that is not a multiple of
104 	 * the  page size, the remaining memory is zeroed when mapped, and
105 	 * writes to that region are not written out to the file."
106 	 */
107 	offset = i_size & (PAGE_SIZE - 1);
108 	if (page->index == end_index && offset)
109 		zero_user_segment(page, offset, PAGE_SIZE);
110 
111 	return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
112 				       end_buffer_async_write);
113 }
114 
115 /**
116  * __gfs2_jdata_write_folio - The core of jdata writepage
117  * @folio: The folio to write
118  * @wbc: The writeback control
119  *
120  * This is shared between writepage and writepages and implements the
121  * core of the writepage operation. If a transaction is required then
122  * the checked flag will have been set and the transaction will have
123  * already been started before this is called.
124  */
125 static int __gfs2_jdata_write_folio(struct folio *folio,
126 		struct writeback_control *wbc)
127 {
128 	struct inode *inode = folio->mapping->host;
129 	struct gfs2_inode *ip = GFS2_I(inode);
130 
131 	if (folio_test_checked(folio)) {
132 		folio_clear_checked(folio);
133 		if (!folio_buffers(folio)) {
134 			folio_create_empty_buffers(folio,
135 					inode->i_sb->s_blocksize,
136 					BIT(BH_Dirty)|BIT(BH_Uptodate));
137 		}
138 		gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
139 	}
140 	return gfs2_write_jdata_page(&folio->page, wbc);
141 }
142 
143 /**
144  * gfs2_jdata_writepage - Write complete page
145  * @page: Page to write
146  * @wbc: The writeback control
147  *
148  * Returns: errno
149  *
150  */
151 
152 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
153 {
154 	struct folio *folio = page_folio(page);
155 	struct inode *inode = page->mapping->host;
156 	struct gfs2_inode *ip = GFS2_I(inode);
157 	struct gfs2_sbd *sdp = GFS2_SB(inode);
158 
159 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
160 		goto out;
161 	if (folio_test_checked(folio) || current->journal_info)
162 		goto out_ignore;
163 	return __gfs2_jdata_write_folio(folio, wbc);
164 
165 out_ignore:
166 	folio_redirty_for_writepage(wbc, folio);
167 out:
168 	folio_unlock(folio);
169 	return 0;
170 }
171 
172 /**
173  * gfs2_writepages - Write a bunch of dirty pages back to disk
174  * @mapping: The mapping to write
175  * @wbc: Write-back control
176  *
177  * Used for both ordered and writeback modes.
178  */
179 static int gfs2_writepages(struct address_space *mapping,
180 			   struct writeback_control *wbc)
181 {
182 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
183 	struct iomap_writepage_ctx wpc = { };
184 	int ret;
185 
186 	/*
187 	 * Even if we didn't write any pages here, we might still be holding
188 	 * dirty pages in the ail. We forcibly flush the ail because we don't
189 	 * want balance_dirty_pages() to loop indefinitely trying to write out
190 	 * pages held in the ail that it can't find.
191 	 */
192 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
193 	if (ret == 0)
194 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
195 	return ret;
196 }
197 
198 /**
199  * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
200  * @mapping: The mapping
201  * @wbc: The writeback control
202  * @fbatch: The batch of folios
203  * @done_index: Page index
204  *
205  * Returns: non-zero if loop should terminate, zero otherwise
206  */
207 
208 static int gfs2_write_jdata_batch(struct address_space *mapping,
209 				    struct writeback_control *wbc,
210 				    struct folio_batch *fbatch,
211 				    pgoff_t *done_index)
212 {
213 	struct inode *inode = mapping->host;
214 	struct gfs2_sbd *sdp = GFS2_SB(inode);
215 	unsigned nrblocks;
216 	int i;
217 	int ret;
218 	int nr_pages = 0;
219 	int nr_folios = folio_batch_count(fbatch);
220 
221 	for (i = 0; i < nr_folios; i++)
222 		nr_pages += folio_nr_pages(fbatch->folios[i]);
223 	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
224 
225 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
226 	if (ret < 0)
227 		return ret;
228 
229 	for (i = 0; i < nr_folios; i++) {
230 		struct folio *folio = fbatch->folios[i];
231 
232 		*done_index = folio->index;
233 
234 		folio_lock(folio);
235 
236 		if (unlikely(folio->mapping != mapping)) {
237 continue_unlock:
238 			folio_unlock(folio);
239 			continue;
240 		}
241 
242 		if (!folio_test_dirty(folio)) {
243 			/* someone wrote it for us */
244 			goto continue_unlock;
245 		}
246 
247 		if (folio_test_writeback(folio)) {
248 			if (wbc->sync_mode != WB_SYNC_NONE)
249 				folio_wait_writeback(folio);
250 			else
251 				goto continue_unlock;
252 		}
253 
254 		BUG_ON(folio_test_writeback(folio));
255 		if (!folio_clear_dirty_for_io(folio))
256 			goto continue_unlock;
257 
258 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
259 
260 		ret = __gfs2_jdata_write_folio(folio, wbc);
261 		if (unlikely(ret)) {
262 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
263 				folio_unlock(folio);
264 				ret = 0;
265 			} else {
266 
267 				/*
268 				 * done_index is set past this page,
269 				 * so media errors will not choke
270 				 * background writeout for the entire
271 				 * file. This has consequences for
272 				 * range_cyclic semantics (ie. it may
273 				 * not be suitable for data integrity
274 				 * writeout).
275 				 */
276 				*done_index = folio->index +
277 					folio_nr_pages(folio);
278 				ret = 1;
279 				break;
280 			}
281 		}
282 
283 		/*
284 		 * We stop writing back only if we are not doing
285 		 * integrity sync. In case of integrity sync we have to
286 		 * keep going until we have written all the pages
287 		 * we tagged for writeback prior to entering this loop.
288 		 */
289 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
290 			ret = 1;
291 			break;
292 		}
293 
294 	}
295 	gfs2_trans_end(sdp);
296 	return ret;
297 }
298 
299 /**
300  * gfs2_write_cache_jdata - Like write_cache_pages but different
301  * @mapping: The mapping to write
302  * @wbc: The writeback control
303  *
304  * The reason that we use our own function here is that we need to
305  * start transactions before we grab page locks. This allows us
306  * to get the ordering right.
307  */
308 
309 static int gfs2_write_cache_jdata(struct address_space *mapping,
310 				  struct writeback_control *wbc)
311 {
312 	int ret = 0;
313 	int done = 0;
314 	struct folio_batch fbatch;
315 	int nr_folios;
316 	pgoff_t writeback_index;
317 	pgoff_t index;
318 	pgoff_t end;
319 	pgoff_t done_index;
320 	int cycled;
321 	int range_whole = 0;
322 	xa_mark_t tag;
323 
324 	folio_batch_init(&fbatch);
325 	if (wbc->range_cyclic) {
326 		writeback_index = mapping->writeback_index; /* prev offset */
327 		index = writeback_index;
328 		if (index == 0)
329 			cycled = 1;
330 		else
331 			cycled = 0;
332 		end = -1;
333 	} else {
334 		index = wbc->range_start >> PAGE_SHIFT;
335 		end = wbc->range_end >> PAGE_SHIFT;
336 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
337 			range_whole = 1;
338 		cycled = 1; /* ignore range_cyclic tests */
339 	}
340 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
341 		tag = PAGECACHE_TAG_TOWRITE;
342 	else
343 		tag = PAGECACHE_TAG_DIRTY;
344 
345 retry:
346 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
347 		tag_pages_for_writeback(mapping, index, end);
348 	done_index = index;
349 	while (!done && (index <= end)) {
350 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
351 				tag, &fbatch);
352 		if (nr_folios == 0)
353 			break;
354 
355 		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
356 				&done_index);
357 		if (ret)
358 			done = 1;
359 		if (ret > 0)
360 			ret = 0;
361 		folio_batch_release(&fbatch);
362 		cond_resched();
363 	}
364 
365 	if (!cycled && !done) {
366 		/*
367 		 * range_cyclic:
368 		 * We hit the last page and there is more work to be done: wrap
369 		 * back to the start of the file
370 		 */
371 		cycled = 1;
372 		index = 0;
373 		end = writeback_index - 1;
374 		goto retry;
375 	}
376 
377 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
378 		mapping->writeback_index = done_index;
379 
380 	return ret;
381 }
382 
383 
384 /**
385  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
386  * @mapping: The mapping to write
387  * @wbc: The writeback control
388  *
389  */
390 
391 static int gfs2_jdata_writepages(struct address_space *mapping,
392 				 struct writeback_control *wbc)
393 {
394 	struct gfs2_inode *ip = GFS2_I(mapping->host);
395 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
396 	int ret;
397 
398 	ret = gfs2_write_cache_jdata(mapping, wbc);
399 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
400 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
401 			       GFS2_LFC_JDATA_WPAGES);
402 		ret = gfs2_write_cache_jdata(mapping, wbc);
403 	}
404 	return ret;
405 }
406 
407 /**
408  * stuffed_readpage - Fill in a Linux page with stuffed file data
409  * @ip: the inode
410  * @page: the page
411  *
412  * Returns: errno
413  */
414 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
415 {
416 	struct buffer_head *dibh;
417 	u64 dsize = i_size_read(&ip->i_inode);
418 	void *kaddr;
419 	int error;
420 
421 	/*
422 	 * Due to the order of unstuffing files and ->fault(), we can be
423 	 * asked for a zero page in the case of a stuffed file being extended,
424 	 * so we need to supply one here. It doesn't happen often.
425 	 */
426 	if (unlikely(page->index)) {
427 		zero_user(page, 0, PAGE_SIZE);
428 		SetPageUptodate(page);
429 		return 0;
430 	}
431 
432 	error = gfs2_meta_inode_buffer(ip, &dibh);
433 	if (error)
434 		return error;
435 
436 	kaddr = kmap_atomic(page);
437 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
438 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
439 	kunmap_atomic(kaddr);
440 	flush_dcache_page(page);
441 	brelse(dibh);
442 	SetPageUptodate(page);
443 
444 	return 0;
445 }
446 
447 /**
448  * gfs2_read_folio - read a folio from a file
449  * @file: The file to read
450  * @folio: The folio in the file
451  */
452 static int gfs2_read_folio(struct file *file, struct folio *folio)
453 {
454 	struct inode *inode = folio->mapping->host;
455 	struct gfs2_inode *ip = GFS2_I(inode);
456 	struct gfs2_sbd *sdp = GFS2_SB(inode);
457 	int error;
458 
459 	if (!gfs2_is_jdata(ip) ||
460 	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
461 		error = iomap_read_folio(folio, &gfs2_iomap_ops);
462 	} else if (gfs2_is_stuffed(ip)) {
463 		error = stuffed_readpage(ip, &folio->page);
464 		folio_unlock(folio);
465 	} else {
466 		error = mpage_read_folio(folio, gfs2_block_map);
467 	}
468 
469 	if (unlikely(gfs2_withdrawn(sdp)))
470 		return -EIO;
471 
472 	return error;
473 }
474 
475 /**
476  * gfs2_internal_read - read an internal file
477  * @ip: The gfs2 inode
478  * @buf: The buffer to fill
479  * @pos: The file position
480  * @size: The amount to read
481  *
482  */
483 
484 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
485                        unsigned size)
486 {
487 	struct address_space *mapping = ip->i_inode.i_mapping;
488 	unsigned long index = *pos >> PAGE_SHIFT;
489 	unsigned offset = *pos & (PAGE_SIZE - 1);
490 	unsigned copied = 0;
491 	unsigned amt;
492 	struct page *page;
493 	void *p;
494 
495 	do {
496 		amt = size - copied;
497 		if (offset + size > PAGE_SIZE)
498 			amt = PAGE_SIZE - offset;
499 		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
500 		if (IS_ERR(page))
501 			return PTR_ERR(page);
502 		p = kmap_atomic(page);
503 		memcpy(buf + copied, p + offset, amt);
504 		kunmap_atomic(p);
505 		put_page(page);
506 		copied += amt;
507 		index++;
508 		offset = 0;
509 	} while(copied < size);
510 	(*pos) += size;
511 	return size;
512 }
513 
514 /**
515  * gfs2_readahead - Read a bunch of pages at once
516  * @rac: Read-ahead control structure
517  *
518  * Some notes:
519  * 1. This is only for readahead, so we can simply ignore any things
520  *    which are slightly inconvenient (such as locking conflicts between
521  *    the page lock and the glock) and return having done no I/O. Its
522  *    obviously not something we'd want to do on too regular a basis.
523  *    Any I/O we ignore at this time will be done via readpage later.
524  * 2. We don't handle stuffed files here we let readpage do the honours.
525  * 3. mpage_readahead() does most of the heavy lifting in the common case.
526  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
527  */
528 
529 static void gfs2_readahead(struct readahead_control *rac)
530 {
531 	struct inode *inode = rac->mapping->host;
532 	struct gfs2_inode *ip = GFS2_I(inode);
533 
534 	if (gfs2_is_stuffed(ip))
535 		;
536 	else if (gfs2_is_jdata(ip))
537 		mpage_readahead(rac, gfs2_block_map);
538 	else
539 		iomap_readahead(rac, &gfs2_iomap_ops);
540 }
541 
542 /**
543  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
544  * @inode: the rindex inode
545  */
546 void adjust_fs_space(struct inode *inode)
547 {
548 	struct gfs2_sbd *sdp = GFS2_SB(inode);
549 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
550 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
551 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
552 	struct buffer_head *m_bh;
553 	u64 fs_total, new_free;
554 
555 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
556 		return;
557 
558 	/* Total up the file system space, according to the latest rindex. */
559 	fs_total = gfs2_ri_total(sdp);
560 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
561 		goto out;
562 
563 	spin_lock(&sdp->sd_statfs_spin);
564 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
565 			      sizeof(struct gfs2_dinode));
566 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
567 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
568 	else
569 		new_free = 0;
570 	spin_unlock(&sdp->sd_statfs_spin);
571 	fs_warn(sdp, "File system extended by %llu blocks.\n",
572 		(unsigned long long)new_free);
573 	gfs2_statfs_change(sdp, new_free, new_free, 0);
574 
575 	update_statfs(sdp, m_bh);
576 	brelse(m_bh);
577 out:
578 	sdp->sd_rindex_uptodate = 0;
579 	gfs2_trans_end(sdp);
580 }
581 
582 static bool jdata_dirty_folio(struct address_space *mapping,
583 		struct folio *folio)
584 {
585 	if (current->journal_info)
586 		folio_set_checked(folio);
587 	return block_dirty_folio(mapping, folio);
588 }
589 
590 /**
591  * gfs2_bmap - Block map function
592  * @mapping: Address space info
593  * @lblock: The block to map
594  *
595  * Returns: The disk address for the block or 0 on hole or error
596  */
597 
598 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
599 {
600 	struct gfs2_inode *ip = GFS2_I(mapping->host);
601 	struct gfs2_holder i_gh;
602 	sector_t dblock = 0;
603 	int error;
604 
605 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
606 	if (error)
607 		return 0;
608 
609 	if (!gfs2_is_stuffed(ip))
610 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
611 
612 	gfs2_glock_dq_uninit(&i_gh);
613 
614 	return dblock;
615 }
616 
617 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
618 {
619 	struct gfs2_bufdata *bd;
620 
621 	lock_buffer(bh);
622 	gfs2_log_lock(sdp);
623 	clear_buffer_dirty(bh);
624 	bd = bh->b_private;
625 	if (bd) {
626 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
627 			list_del_init(&bd->bd_list);
628 		else {
629 			spin_lock(&sdp->sd_ail_lock);
630 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
631 			spin_unlock(&sdp->sd_ail_lock);
632 		}
633 	}
634 	bh->b_bdev = NULL;
635 	clear_buffer_mapped(bh);
636 	clear_buffer_req(bh);
637 	clear_buffer_new(bh);
638 	gfs2_log_unlock(sdp);
639 	unlock_buffer(bh);
640 }
641 
642 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
643 				size_t length)
644 {
645 	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
646 	size_t stop = offset + length;
647 	int partial_page = (offset || length < folio_size(folio));
648 	struct buffer_head *bh, *head;
649 	unsigned long pos = 0;
650 
651 	BUG_ON(!folio_test_locked(folio));
652 	if (!partial_page)
653 		folio_clear_checked(folio);
654 	head = folio_buffers(folio);
655 	if (!head)
656 		goto out;
657 
658 	bh = head;
659 	do {
660 		if (pos + bh->b_size > stop)
661 			return;
662 
663 		if (offset <= pos)
664 			gfs2_discard(sdp, bh);
665 		pos += bh->b_size;
666 		bh = bh->b_this_page;
667 	} while (bh != head);
668 out:
669 	if (!partial_page)
670 		filemap_release_folio(folio, 0);
671 }
672 
673 /**
674  * gfs2_release_folio - free the metadata associated with a folio
675  * @folio: the folio that's being released
676  * @gfp_mask: passed from Linux VFS, ignored by us
677  *
678  * Calls try_to_free_buffers() to free the buffers and put the folio if the
679  * buffers can be released.
680  *
681  * Returns: true if the folio was put or else false
682  */
683 
684 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
685 {
686 	struct address_space *mapping = folio->mapping;
687 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
688 	struct buffer_head *bh, *head;
689 	struct gfs2_bufdata *bd;
690 
691 	head = folio_buffers(folio);
692 	if (!head)
693 		return false;
694 
695 	/*
696 	 * mm accommodates an old ext3 case where clean folios might
697 	 * not have had the dirty bit cleared.	Thus, it can send actual
698 	 * dirty folios to ->release_folio() via shrink_active_list().
699 	 *
700 	 * As a workaround, we skip folios that contain dirty buffers
701 	 * below.  Once ->release_folio isn't called on dirty folios
702 	 * anymore, we can warn on dirty buffers like we used to here
703 	 * again.
704 	 */
705 
706 	gfs2_log_lock(sdp);
707 	bh = head;
708 	do {
709 		if (atomic_read(&bh->b_count))
710 			goto cannot_release;
711 		bd = bh->b_private;
712 		if (bd && bd->bd_tr)
713 			goto cannot_release;
714 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
715 			goto cannot_release;
716 		bh = bh->b_this_page;
717 	} while (bh != head);
718 
719 	bh = head;
720 	do {
721 		bd = bh->b_private;
722 		if (bd) {
723 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
724 			bd->bd_bh = NULL;
725 			bh->b_private = NULL;
726 			/*
727 			 * The bd may still be queued as a revoke, in which
728 			 * case we must not dequeue nor free it.
729 			 */
730 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
731 				list_del_init(&bd->bd_list);
732 			if (list_empty(&bd->bd_list))
733 				kmem_cache_free(gfs2_bufdata_cachep, bd);
734 		}
735 
736 		bh = bh->b_this_page;
737 	} while (bh != head);
738 	gfs2_log_unlock(sdp);
739 
740 	return try_to_free_buffers(folio);
741 
742 cannot_release:
743 	gfs2_log_unlock(sdp);
744 	return false;
745 }
746 
747 static const struct address_space_operations gfs2_aops = {
748 	.writepages = gfs2_writepages,
749 	.read_folio = gfs2_read_folio,
750 	.readahead = gfs2_readahead,
751 	.dirty_folio = filemap_dirty_folio,
752 	.release_folio = iomap_release_folio,
753 	.invalidate_folio = iomap_invalidate_folio,
754 	.bmap = gfs2_bmap,
755 	.direct_IO = noop_direct_IO,
756 	.migrate_folio = filemap_migrate_folio,
757 	.is_partially_uptodate = iomap_is_partially_uptodate,
758 	.error_remove_page = generic_error_remove_page,
759 };
760 
761 static const struct address_space_operations gfs2_jdata_aops = {
762 	.writepage = gfs2_jdata_writepage,
763 	.writepages = gfs2_jdata_writepages,
764 	.read_folio = gfs2_read_folio,
765 	.readahead = gfs2_readahead,
766 	.dirty_folio = jdata_dirty_folio,
767 	.bmap = gfs2_bmap,
768 	.invalidate_folio = gfs2_invalidate_folio,
769 	.release_folio = gfs2_release_folio,
770 	.is_partially_uptodate = block_is_partially_uptodate,
771 	.error_remove_page = generic_error_remove_page,
772 };
773 
774 void gfs2_set_aops(struct inode *inode)
775 {
776 	if (gfs2_is_jdata(GFS2_I(inode)))
777 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
778 	else
779 		inode->i_mapping->a_ops = &gfs2_aops;
780 }
781