xref: /openbmc/linux/fs/gfs2/aops.c (revision ae6385af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23 
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38 
39 
40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41 			     unsigned int from, unsigned int len)
42 {
43 	struct buffer_head *head = folio_buffers(folio);
44 	unsigned int bsize = head->b_size;
45 	struct buffer_head *bh;
46 	unsigned int to = from + len;
47 	unsigned int start, end;
48 
49 	for (bh = head, start = 0; bh != head || !start;
50 	     bh = bh->b_this_page, start = end) {
51 		end = start + bsize;
52 		if (end <= from)
53 			continue;
54 		if (start >= to)
55 			break;
56 		set_buffer_uptodate(bh);
57 		gfs2_trans_add_data(ip->i_gl, bh);
58 	}
59 }
60 
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70 
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 				  struct buffer_head *bh_result, int create)
73 {
74 	int error;
75 
76 	error = gfs2_block_map(inode, lblock, bh_result, 0);
77 	if (error)
78 		return error;
79 	if (!buffer_mapped(bh_result))
80 		return -ENODATA;
81 	return 0;
82 }
83 
84 /**
85  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86  * @page: The page to write
87  * @wbc: The writeback control
88  *
89  * This is the same as calling block_write_full_page, but it also
90  * writes pages outside of i_size
91  */
92 static int gfs2_write_jdata_page(struct page *page,
93 				 struct writeback_control *wbc)
94 {
95 	struct inode * const inode = page->mapping->host;
96 	loff_t i_size = i_size_read(inode);
97 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
98 	unsigned offset;
99 
100 	/*
101 	 * The page straddles i_size.  It must be zeroed out on each and every
102 	 * writepage invocation because it may be mmapped.  "A file is mapped
103 	 * in multiples of the page size.  For a file that is not a multiple of
104 	 * the  page size, the remaining memory is zeroed when mapped, and
105 	 * writes to that region are not written out to the file."
106 	 */
107 	offset = i_size & (PAGE_SIZE - 1);
108 	if (page->index == end_index && offset)
109 		zero_user_segment(page, offset, PAGE_SIZE);
110 
111 	return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
112 				       end_buffer_async_write);
113 }
114 
115 /**
116  * __gfs2_jdata_writepage - The core of jdata writepage
117  * @page: The page to write
118  * @wbc: The writeback control
119  *
120  * This is shared between writepage and writepages and implements the
121  * core of the writepage operation. If a transaction is required then
122  * PageChecked will have been set and the transaction will have
123  * already been started before this is called.
124  */
125 
126 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
127 {
128 	struct inode *inode = page->mapping->host;
129 	struct gfs2_inode *ip = GFS2_I(inode);
130 
131 	if (PageChecked(page)) {
132 		ClearPageChecked(page);
133 		if (!page_has_buffers(page)) {
134 			create_empty_buffers(page, inode->i_sb->s_blocksize,
135 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
136 		}
137 		gfs2_trans_add_databufs(ip, page_folio(page), 0, PAGE_SIZE);
138 	}
139 	return gfs2_write_jdata_page(page, wbc);
140 }
141 
142 /**
143  * gfs2_jdata_writepage - Write complete page
144  * @page: Page to write
145  * @wbc: The writeback control
146  *
147  * Returns: errno
148  *
149  */
150 
151 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152 {
153 	struct inode *inode = page->mapping->host;
154 	struct gfs2_inode *ip = GFS2_I(inode);
155 	struct gfs2_sbd *sdp = GFS2_SB(inode);
156 
157 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
158 		goto out;
159 	if (PageChecked(page) || current->journal_info)
160 		goto out_ignore;
161 	return __gfs2_jdata_writepage(page, wbc);
162 
163 out_ignore:
164 	redirty_page_for_writepage(wbc, page);
165 out:
166 	unlock_page(page);
167 	return 0;
168 }
169 
170 /**
171  * gfs2_writepages - Write a bunch of dirty pages back to disk
172  * @mapping: The mapping to write
173  * @wbc: Write-back control
174  *
175  * Used for both ordered and writeback modes.
176  */
177 static int gfs2_writepages(struct address_space *mapping,
178 			   struct writeback_control *wbc)
179 {
180 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
181 	struct iomap_writepage_ctx wpc = { };
182 	int ret;
183 
184 	/*
185 	 * Even if we didn't write any pages here, we might still be holding
186 	 * dirty pages in the ail. We forcibly flush the ail because we don't
187 	 * want balance_dirty_pages() to loop indefinitely trying to write out
188 	 * pages held in the ail that it can't find.
189 	 */
190 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
191 	if (ret == 0)
192 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
193 	return ret;
194 }
195 
196 /**
197  * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
198  * @mapping: The mapping
199  * @wbc: The writeback control
200  * @fbatch: The batch of folios
201  * @done_index: Page index
202  *
203  * Returns: non-zero if loop should terminate, zero otherwise
204  */
205 
206 static int gfs2_write_jdata_batch(struct address_space *mapping,
207 				    struct writeback_control *wbc,
208 				    struct folio_batch *fbatch,
209 				    pgoff_t *done_index)
210 {
211 	struct inode *inode = mapping->host;
212 	struct gfs2_sbd *sdp = GFS2_SB(inode);
213 	unsigned nrblocks;
214 	int i;
215 	int ret;
216 	int nr_pages = 0;
217 	int nr_folios = folio_batch_count(fbatch);
218 
219 	for (i = 0; i < nr_folios; i++)
220 		nr_pages += folio_nr_pages(fbatch->folios[i]);
221 	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
222 
223 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
224 	if (ret < 0)
225 		return ret;
226 
227 	for (i = 0; i < nr_folios; i++) {
228 		struct folio *folio = fbatch->folios[i];
229 
230 		*done_index = folio->index;
231 
232 		folio_lock(folio);
233 
234 		if (unlikely(folio->mapping != mapping)) {
235 continue_unlock:
236 			folio_unlock(folio);
237 			continue;
238 		}
239 
240 		if (!folio_test_dirty(folio)) {
241 			/* someone wrote it for us */
242 			goto continue_unlock;
243 		}
244 
245 		if (folio_test_writeback(folio)) {
246 			if (wbc->sync_mode != WB_SYNC_NONE)
247 				folio_wait_writeback(folio);
248 			else
249 				goto continue_unlock;
250 		}
251 
252 		BUG_ON(folio_test_writeback(folio));
253 		if (!folio_clear_dirty_for_io(folio))
254 			goto continue_unlock;
255 
256 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
257 
258 		ret = __gfs2_jdata_writepage(&folio->page, wbc);
259 		if (unlikely(ret)) {
260 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
261 				folio_unlock(folio);
262 				ret = 0;
263 			} else {
264 
265 				/*
266 				 * done_index is set past this page,
267 				 * so media errors will not choke
268 				 * background writeout for the entire
269 				 * file. This has consequences for
270 				 * range_cyclic semantics (ie. it may
271 				 * not be suitable for data integrity
272 				 * writeout).
273 				 */
274 				*done_index = folio->index +
275 					folio_nr_pages(folio);
276 				ret = 1;
277 				break;
278 			}
279 		}
280 
281 		/*
282 		 * We stop writing back only if we are not doing
283 		 * integrity sync. In case of integrity sync we have to
284 		 * keep going until we have written all the pages
285 		 * we tagged for writeback prior to entering this loop.
286 		 */
287 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
288 			ret = 1;
289 			break;
290 		}
291 
292 	}
293 	gfs2_trans_end(sdp);
294 	return ret;
295 }
296 
297 /**
298  * gfs2_write_cache_jdata - Like write_cache_pages but different
299  * @mapping: The mapping to write
300  * @wbc: The writeback control
301  *
302  * The reason that we use our own function here is that we need to
303  * start transactions before we grab page locks. This allows us
304  * to get the ordering right.
305  */
306 
307 static int gfs2_write_cache_jdata(struct address_space *mapping,
308 				  struct writeback_control *wbc)
309 {
310 	int ret = 0;
311 	int done = 0;
312 	struct folio_batch fbatch;
313 	int nr_folios;
314 	pgoff_t writeback_index;
315 	pgoff_t index;
316 	pgoff_t end;
317 	pgoff_t done_index;
318 	int cycled;
319 	int range_whole = 0;
320 	xa_mark_t tag;
321 
322 	folio_batch_init(&fbatch);
323 	if (wbc->range_cyclic) {
324 		writeback_index = mapping->writeback_index; /* prev offset */
325 		index = writeback_index;
326 		if (index == 0)
327 			cycled = 1;
328 		else
329 			cycled = 0;
330 		end = -1;
331 	} else {
332 		index = wbc->range_start >> PAGE_SHIFT;
333 		end = wbc->range_end >> PAGE_SHIFT;
334 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335 			range_whole = 1;
336 		cycled = 1; /* ignore range_cyclic tests */
337 	}
338 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339 		tag = PAGECACHE_TAG_TOWRITE;
340 	else
341 		tag = PAGECACHE_TAG_DIRTY;
342 
343 retry:
344 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345 		tag_pages_for_writeback(mapping, index, end);
346 	done_index = index;
347 	while (!done && (index <= end)) {
348 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
349 				tag, &fbatch);
350 		if (nr_folios == 0)
351 			break;
352 
353 		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354 				&done_index);
355 		if (ret)
356 			done = 1;
357 		if (ret > 0)
358 			ret = 0;
359 		folio_batch_release(&fbatch);
360 		cond_resched();
361 	}
362 
363 	if (!cycled && !done) {
364 		/*
365 		 * range_cyclic:
366 		 * We hit the last page and there is more work to be done: wrap
367 		 * back to the start of the file
368 		 */
369 		cycled = 1;
370 		index = 0;
371 		end = writeback_index - 1;
372 		goto retry;
373 	}
374 
375 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
376 		mapping->writeback_index = done_index;
377 
378 	return ret;
379 }
380 
381 
382 /**
383  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384  * @mapping: The mapping to write
385  * @wbc: The writeback control
386  *
387  */
388 
389 static int gfs2_jdata_writepages(struct address_space *mapping,
390 				 struct writeback_control *wbc)
391 {
392 	struct gfs2_inode *ip = GFS2_I(mapping->host);
393 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394 	int ret;
395 
396 	ret = gfs2_write_cache_jdata(mapping, wbc);
397 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
398 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399 			       GFS2_LFC_JDATA_WPAGES);
400 		ret = gfs2_write_cache_jdata(mapping, wbc);
401 	}
402 	return ret;
403 }
404 
405 /**
406  * stuffed_readpage - Fill in a Linux page with stuffed file data
407  * @ip: the inode
408  * @page: the page
409  *
410  * Returns: errno
411  */
412 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
413 {
414 	struct buffer_head *dibh;
415 	u64 dsize = i_size_read(&ip->i_inode);
416 	void *kaddr;
417 	int error;
418 
419 	/*
420 	 * Due to the order of unstuffing files and ->fault(), we can be
421 	 * asked for a zero page in the case of a stuffed file being extended,
422 	 * so we need to supply one here. It doesn't happen often.
423 	 */
424 	if (unlikely(page->index)) {
425 		zero_user(page, 0, PAGE_SIZE);
426 		SetPageUptodate(page);
427 		return 0;
428 	}
429 
430 	error = gfs2_meta_inode_buffer(ip, &dibh);
431 	if (error)
432 		return error;
433 
434 	kaddr = kmap_atomic(page);
435 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
436 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
437 	kunmap_atomic(kaddr);
438 	flush_dcache_page(page);
439 	brelse(dibh);
440 	SetPageUptodate(page);
441 
442 	return 0;
443 }
444 
445 /**
446  * gfs2_read_folio - read a folio from a file
447  * @file: The file to read
448  * @folio: The folio in the file
449  */
450 static int gfs2_read_folio(struct file *file, struct folio *folio)
451 {
452 	struct inode *inode = folio->mapping->host;
453 	struct gfs2_inode *ip = GFS2_I(inode);
454 	struct gfs2_sbd *sdp = GFS2_SB(inode);
455 	int error;
456 
457 	if (!gfs2_is_jdata(ip) ||
458 	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
459 		error = iomap_read_folio(folio, &gfs2_iomap_ops);
460 	} else if (gfs2_is_stuffed(ip)) {
461 		error = stuffed_readpage(ip, &folio->page);
462 		folio_unlock(folio);
463 	} else {
464 		error = mpage_read_folio(folio, gfs2_block_map);
465 	}
466 
467 	if (unlikely(gfs2_withdrawn(sdp)))
468 		return -EIO;
469 
470 	return error;
471 }
472 
473 /**
474  * gfs2_internal_read - read an internal file
475  * @ip: The gfs2 inode
476  * @buf: The buffer to fill
477  * @pos: The file position
478  * @size: The amount to read
479  *
480  */
481 
482 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
483                        unsigned size)
484 {
485 	struct address_space *mapping = ip->i_inode.i_mapping;
486 	unsigned long index = *pos >> PAGE_SHIFT;
487 	unsigned offset = *pos & (PAGE_SIZE - 1);
488 	unsigned copied = 0;
489 	unsigned amt;
490 	struct page *page;
491 	void *p;
492 
493 	do {
494 		amt = size - copied;
495 		if (offset + size > PAGE_SIZE)
496 			amt = PAGE_SIZE - offset;
497 		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
498 		if (IS_ERR(page))
499 			return PTR_ERR(page);
500 		p = kmap_atomic(page);
501 		memcpy(buf + copied, p + offset, amt);
502 		kunmap_atomic(p);
503 		put_page(page);
504 		copied += amt;
505 		index++;
506 		offset = 0;
507 	} while(copied < size);
508 	(*pos) += size;
509 	return size;
510 }
511 
512 /**
513  * gfs2_readahead - Read a bunch of pages at once
514  * @rac: Read-ahead control structure
515  *
516  * Some notes:
517  * 1. This is only for readahead, so we can simply ignore any things
518  *    which are slightly inconvenient (such as locking conflicts between
519  *    the page lock and the glock) and return having done no I/O. Its
520  *    obviously not something we'd want to do on too regular a basis.
521  *    Any I/O we ignore at this time will be done via readpage later.
522  * 2. We don't handle stuffed files here we let readpage do the honours.
523  * 3. mpage_readahead() does most of the heavy lifting in the common case.
524  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
525  */
526 
527 static void gfs2_readahead(struct readahead_control *rac)
528 {
529 	struct inode *inode = rac->mapping->host;
530 	struct gfs2_inode *ip = GFS2_I(inode);
531 
532 	if (gfs2_is_stuffed(ip))
533 		;
534 	else if (gfs2_is_jdata(ip))
535 		mpage_readahead(rac, gfs2_block_map);
536 	else
537 		iomap_readahead(rac, &gfs2_iomap_ops);
538 }
539 
540 /**
541  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
542  * @inode: the rindex inode
543  */
544 void adjust_fs_space(struct inode *inode)
545 {
546 	struct gfs2_sbd *sdp = GFS2_SB(inode);
547 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
548 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
549 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
550 	struct buffer_head *m_bh;
551 	u64 fs_total, new_free;
552 
553 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
554 		return;
555 
556 	/* Total up the file system space, according to the latest rindex. */
557 	fs_total = gfs2_ri_total(sdp);
558 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
559 		goto out;
560 
561 	spin_lock(&sdp->sd_statfs_spin);
562 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
563 			      sizeof(struct gfs2_dinode));
564 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
565 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
566 	else
567 		new_free = 0;
568 	spin_unlock(&sdp->sd_statfs_spin);
569 	fs_warn(sdp, "File system extended by %llu blocks.\n",
570 		(unsigned long long)new_free);
571 	gfs2_statfs_change(sdp, new_free, new_free, 0);
572 
573 	update_statfs(sdp, m_bh);
574 	brelse(m_bh);
575 out:
576 	sdp->sd_rindex_uptodate = 0;
577 	gfs2_trans_end(sdp);
578 }
579 
580 static bool jdata_dirty_folio(struct address_space *mapping,
581 		struct folio *folio)
582 {
583 	if (current->journal_info)
584 		folio_set_checked(folio);
585 	return block_dirty_folio(mapping, folio);
586 }
587 
588 /**
589  * gfs2_bmap - Block map function
590  * @mapping: Address space info
591  * @lblock: The block to map
592  *
593  * Returns: The disk address for the block or 0 on hole or error
594  */
595 
596 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
597 {
598 	struct gfs2_inode *ip = GFS2_I(mapping->host);
599 	struct gfs2_holder i_gh;
600 	sector_t dblock = 0;
601 	int error;
602 
603 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
604 	if (error)
605 		return 0;
606 
607 	if (!gfs2_is_stuffed(ip))
608 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
609 
610 	gfs2_glock_dq_uninit(&i_gh);
611 
612 	return dblock;
613 }
614 
615 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
616 {
617 	struct gfs2_bufdata *bd;
618 
619 	lock_buffer(bh);
620 	gfs2_log_lock(sdp);
621 	clear_buffer_dirty(bh);
622 	bd = bh->b_private;
623 	if (bd) {
624 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
625 			list_del_init(&bd->bd_list);
626 		else {
627 			spin_lock(&sdp->sd_ail_lock);
628 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
629 			spin_unlock(&sdp->sd_ail_lock);
630 		}
631 	}
632 	bh->b_bdev = NULL;
633 	clear_buffer_mapped(bh);
634 	clear_buffer_req(bh);
635 	clear_buffer_new(bh);
636 	gfs2_log_unlock(sdp);
637 	unlock_buffer(bh);
638 }
639 
640 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
641 				size_t length)
642 {
643 	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
644 	size_t stop = offset + length;
645 	int partial_page = (offset || length < folio_size(folio));
646 	struct buffer_head *bh, *head;
647 	unsigned long pos = 0;
648 
649 	BUG_ON(!folio_test_locked(folio));
650 	if (!partial_page)
651 		folio_clear_checked(folio);
652 	head = folio_buffers(folio);
653 	if (!head)
654 		goto out;
655 
656 	bh = head;
657 	do {
658 		if (pos + bh->b_size > stop)
659 			return;
660 
661 		if (offset <= pos)
662 			gfs2_discard(sdp, bh);
663 		pos += bh->b_size;
664 		bh = bh->b_this_page;
665 	} while (bh != head);
666 out:
667 	if (!partial_page)
668 		filemap_release_folio(folio, 0);
669 }
670 
671 /**
672  * gfs2_release_folio - free the metadata associated with a folio
673  * @folio: the folio that's being released
674  * @gfp_mask: passed from Linux VFS, ignored by us
675  *
676  * Calls try_to_free_buffers() to free the buffers and put the folio if the
677  * buffers can be released.
678  *
679  * Returns: true if the folio was put or else false
680  */
681 
682 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
683 {
684 	struct address_space *mapping = folio->mapping;
685 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
686 	struct buffer_head *bh, *head;
687 	struct gfs2_bufdata *bd;
688 
689 	head = folio_buffers(folio);
690 	if (!head)
691 		return false;
692 
693 	/*
694 	 * mm accommodates an old ext3 case where clean folios might
695 	 * not have had the dirty bit cleared.	Thus, it can send actual
696 	 * dirty folios to ->release_folio() via shrink_active_list().
697 	 *
698 	 * As a workaround, we skip folios that contain dirty buffers
699 	 * below.  Once ->release_folio isn't called on dirty folios
700 	 * anymore, we can warn on dirty buffers like we used to here
701 	 * again.
702 	 */
703 
704 	gfs2_log_lock(sdp);
705 	bh = head;
706 	do {
707 		if (atomic_read(&bh->b_count))
708 			goto cannot_release;
709 		bd = bh->b_private;
710 		if (bd && bd->bd_tr)
711 			goto cannot_release;
712 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
713 			goto cannot_release;
714 		bh = bh->b_this_page;
715 	} while (bh != head);
716 
717 	bh = head;
718 	do {
719 		bd = bh->b_private;
720 		if (bd) {
721 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
722 			bd->bd_bh = NULL;
723 			bh->b_private = NULL;
724 			/*
725 			 * The bd may still be queued as a revoke, in which
726 			 * case we must not dequeue nor free it.
727 			 */
728 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
729 				list_del_init(&bd->bd_list);
730 			if (list_empty(&bd->bd_list))
731 				kmem_cache_free(gfs2_bufdata_cachep, bd);
732 		}
733 
734 		bh = bh->b_this_page;
735 	} while (bh != head);
736 	gfs2_log_unlock(sdp);
737 
738 	return try_to_free_buffers(folio);
739 
740 cannot_release:
741 	gfs2_log_unlock(sdp);
742 	return false;
743 }
744 
745 static const struct address_space_operations gfs2_aops = {
746 	.writepages = gfs2_writepages,
747 	.read_folio = gfs2_read_folio,
748 	.readahead = gfs2_readahead,
749 	.dirty_folio = filemap_dirty_folio,
750 	.release_folio = iomap_release_folio,
751 	.invalidate_folio = iomap_invalidate_folio,
752 	.bmap = gfs2_bmap,
753 	.direct_IO = noop_direct_IO,
754 	.migrate_folio = filemap_migrate_folio,
755 	.is_partially_uptodate = iomap_is_partially_uptodate,
756 	.error_remove_page = generic_error_remove_page,
757 };
758 
759 static const struct address_space_operations gfs2_jdata_aops = {
760 	.writepage = gfs2_jdata_writepage,
761 	.writepages = gfs2_jdata_writepages,
762 	.read_folio = gfs2_read_folio,
763 	.readahead = gfs2_readahead,
764 	.dirty_folio = jdata_dirty_folio,
765 	.bmap = gfs2_bmap,
766 	.invalidate_folio = gfs2_invalidate_folio,
767 	.release_folio = gfs2_release_folio,
768 	.is_partially_uptodate = block_is_partially_uptodate,
769 	.error_remove_page = generic_error_remove_page,
770 };
771 
772 void gfs2_set_aops(struct inode *inode)
773 {
774 	if (gfs2_is_jdata(GFS2_I(inode)))
775 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
776 	else
777 		inode->i_mapping->a_ops = &gfs2_aops;
778 }
779