xref: /openbmc/linux/fs/gfs2/meta_io.c (revision d6b412c5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/delay.h>
17 #include <linux/bio.h>
18 #include <linux/gfs2_ondisk.h>
19 
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "glock.h"
23 #include "glops.h"
24 #include "inode.h"
25 #include "log.h"
26 #include "lops.h"
27 #include "meta_io.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
31 #include "trace_gfs2.h"
32 
gfs2_aspace_writepage(struct page * page,struct writeback_control * wbc)33 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
34 {
35 	struct buffer_head *bh, *head;
36 	int nr_underway = 0;
37 	blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
38 
39 	BUG_ON(!PageLocked(page));
40 	BUG_ON(!page_has_buffers(page));
41 
42 	head = page_buffers(page);
43 	bh = head;
44 
45 	do {
46 		if (!buffer_mapped(bh))
47 			continue;
48 		/*
49 		 * If it's a fully non-blocking write attempt and we cannot
50 		 * lock the buffer then redirty the page.  Note that this can
51 		 * potentially cause a busy-wait loop from flusher thread and kswapd
52 		 * activity, but those code paths have their own higher-level
53 		 * throttling.
54 		 */
55 		if (wbc->sync_mode != WB_SYNC_NONE) {
56 			lock_buffer(bh);
57 		} else if (!trylock_buffer(bh)) {
58 			redirty_page_for_writepage(wbc, page);
59 			continue;
60 		}
61 		if (test_clear_buffer_dirty(bh)) {
62 			mark_buffer_async_write(bh);
63 		} else {
64 			unlock_buffer(bh);
65 		}
66 	} while ((bh = bh->b_this_page) != head);
67 
68 	/*
69 	 * The page and its buffers are protected by PageWriteback(), so we can
70 	 * drop the bh refcounts early.
71 	 */
72 	BUG_ON(PageWriteback(page));
73 	set_page_writeback(page);
74 
75 	do {
76 		struct buffer_head *next = bh->b_this_page;
77 		if (buffer_async_write(bh)) {
78 			submit_bh(REQ_OP_WRITE | write_flags, bh);
79 			nr_underway++;
80 		}
81 		bh = next;
82 	} while (bh != head);
83 	unlock_page(page);
84 
85 	if (nr_underway == 0)
86 		end_page_writeback(page);
87 
88 	return 0;
89 }
90 
91 const struct address_space_operations gfs2_meta_aops = {
92 	.dirty_folio	= block_dirty_folio,
93 	.invalidate_folio = block_invalidate_folio,
94 	.writepage = gfs2_aspace_writepage,
95 	.release_folio = gfs2_release_folio,
96 };
97 
98 const struct address_space_operations gfs2_rgrp_aops = {
99 	.dirty_folio	= block_dirty_folio,
100 	.invalidate_folio = block_invalidate_folio,
101 	.writepage = gfs2_aspace_writepage,
102 	.release_folio = gfs2_release_folio,
103 };
104 
105 /**
106  * gfs2_getbuf - Get a buffer with a given address space
107  * @gl: the glock
108  * @blkno: the block number (filesystem scope)
109  * @create: 1 if the buffer should be created
110  *
111  * Returns: the buffer
112  */
113 
gfs2_getbuf(struct gfs2_glock * gl,u64 blkno,int create)114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115 {
116 	struct address_space *mapping = gfs2_glock2aspace(gl);
117 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118 	struct page *page;
119 	struct buffer_head *bh;
120 	unsigned int shift;
121 	unsigned long index;
122 	unsigned int bufnum;
123 
124 	if (mapping == NULL)
125 		mapping = &sdp->sd_aspace;
126 
127 	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 	index = blkno >> shift;             /* convert block to page */
129 	bufnum = blkno - (index << shift);  /* block buf index within page */
130 
131 	if (create) {
132 		for (;;) {
133 			page = grab_cache_page(mapping, index);
134 			if (page)
135 				break;
136 			yield();
137 		}
138 		if (!page_has_buffers(page))
139 			create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
140 	} else {
141 		page = find_get_page_flags(mapping, index,
142 						FGP_LOCK|FGP_ACCESSED);
143 		if (!page)
144 			return NULL;
145 		if (!page_has_buffers(page)) {
146 			bh = NULL;
147 			goto out_unlock;
148 		}
149 	}
150 
151 	/* Locate header for our buffer within our page */
152 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
153 		/* Do nothing */;
154 	get_bh(bh);
155 
156 	if (!buffer_mapped(bh))
157 		map_bh(bh, sdp->sd_vfs, blkno);
158 
159 out_unlock:
160 	unlock_page(page);
161 	put_page(page);
162 
163 	return bh;
164 }
165 
meta_prep_new(struct buffer_head * bh)166 static void meta_prep_new(struct buffer_head *bh)
167 {
168 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
169 
170 	lock_buffer(bh);
171 	clear_buffer_dirty(bh);
172 	set_buffer_uptodate(bh);
173 	unlock_buffer(bh);
174 
175 	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
176 }
177 
178 /**
179  * gfs2_meta_new - Get a block
180  * @gl: The glock associated with this block
181  * @blkno: The block number
182  *
183  * Returns: The buffer
184  */
185 
gfs2_meta_new(struct gfs2_glock * gl,u64 blkno)186 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
187 {
188 	struct buffer_head *bh;
189 	bh = gfs2_getbuf(gl, blkno, CREATE);
190 	meta_prep_new(bh);
191 	return bh;
192 }
193 
gfs2_meta_read_endio(struct bio * bio)194 static void gfs2_meta_read_endio(struct bio *bio)
195 {
196 	struct bio_vec *bvec;
197 	struct bvec_iter_all iter_all;
198 
199 	bio_for_each_segment_all(bvec, bio, iter_all) {
200 		struct page *page = bvec->bv_page;
201 		struct buffer_head *bh = page_buffers(page);
202 		unsigned int len = bvec->bv_len;
203 
204 		while (bh_offset(bh) < bvec->bv_offset)
205 			bh = bh->b_this_page;
206 		do {
207 			struct buffer_head *next = bh->b_this_page;
208 			len -= bh->b_size;
209 			bh->b_end_io(bh, !bio->bi_status);
210 			bh = next;
211 		} while (bh && len);
212 	}
213 	bio_put(bio);
214 }
215 
216 /*
217  * Submit several consecutive buffer head I/O requests as a single bio I/O
218  * request.  (See submit_bh_wbc.)
219  */
gfs2_submit_bhs(blk_opf_t opf,struct buffer_head * bhs[],int num)220 static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
221 {
222 	while (num > 0) {
223 		struct buffer_head *bh = *bhs;
224 		struct bio *bio;
225 
226 		bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
227 		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
228 		while (num > 0) {
229 			bh = *bhs;
230 			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
231 				BUG_ON(bio->bi_iter.bi_size == 0);
232 				break;
233 			}
234 			bhs++;
235 			num--;
236 		}
237 		bio->bi_end_io = gfs2_meta_read_endio;
238 		submit_bio(bio);
239 	}
240 }
241 
242 /**
243  * gfs2_meta_read - Read a block from disk
244  * @gl: The glock covering the block
245  * @blkno: The block number
246  * @flags: flags
247  * @rahead: Do read-ahead
248  * @bhp: the place where the buffer is returned (NULL on failure)
249  *
250  * Returns: errno
251  */
252 
gfs2_meta_read(struct gfs2_glock * gl,u64 blkno,int flags,int rahead,struct buffer_head ** bhp)253 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
254 		   int rahead, struct buffer_head **bhp)
255 {
256 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
257 	struct buffer_head *bh, *bhs[2];
258 	int num = 0;
259 
260 	if (gfs2_withdrawing_or_withdrawn(sdp) &&
261 	    !gfs2_withdraw_in_prog(sdp)) {
262 		*bhp = NULL;
263 		return -EIO;
264 	}
265 
266 	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
267 
268 	lock_buffer(bh);
269 	if (buffer_uptodate(bh)) {
270 		unlock_buffer(bh);
271 		flags &= ~DIO_WAIT;
272 	} else {
273 		bh->b_end_io = end_buffer_read_sync;
274 		get_bh(bh);
275 		bhs[num++] = bh;
276 	}
277 
278 	if (rahead) {
279 		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
280 
281 		lock_buffer(bh);
282 		if (buffer_uptodate(bh)) {
283 			unlock_buffer(bh);
284 			brelse(bh);
285 		} else {
286 			bh->b_end_io = end_buffer_read_sync;
287 			bhs[num++] = bh;
288 		}
289 	}
290 
291 	gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num);
292 	if (!(flags & DIO_WAIT))
293 		return 0;
294 
295 	bh = *bhp;
296 	wait_on_buffer(bh);
297 	if (unlikely(!buffer_uptodate(bh))) {
298 		struct gfs2_trans *tr = current->journal_info;
299 		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
300 			gfs2_io_error_bh_wd(sdp, bh);
301 		brelse(bh);
302 		*bhp = NULL;
303 		return -EIO;
304 	}
305 
306 	return 0;
307 }
308 
309 /**
310  * gfs2_meta_wait - Reread a block from disk
311  * @sdp: the filesystem
312  * @bh: The block to wait for
313  *
314  * Returns: errno
315  */
316 
gfs2_meta_wait(struct gfs2_sbd * sdp,struct buffer_head * bh)317 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
318 {
319 	if (gfs2_withdrawing_or_withdrawn(sdp) &&
320 	    !gfs2_withdraw_in_prog(sdp))
321 		return -EIO;
322 
323 	wait_on_buffer(bh);
324 
325 	if (!buffer_uptodate(bh)) {
326 		struct gfs2_trans *tr = current->journal_info;
327 		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
328 			gfs2_io_error_bh_wd(sdp, bh);
329 		return -EIO;
330 	}
331 	if (gfs2_withdrawing_or_withdrawn(sdp) &&
332 	    !gfs2_withdraw_in_prog(sdp))
333 		return -EIO;
334 
335 	return 0;
336 }
337 
gfs2_remove_from_journal(struct buffer_head * bh,int meta)338 void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
339 {
340 	struct address_space *mapping = bh->b_folio->mapping;
341 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
342 	struct gfs2_bufdata *bd = bh->b_private;
343 	struct gfs2_trans *tr = current->journal_info;
344 	int was_pinned = 0;
345 
346 	if (test_clear_buffer_pinned(bh)) {
347 		trace_gfs2_pin(bd, 0);
348 		atomic_dec(&sdp->sd_log_pinned);
349 		list_del_init(&bd->bd_list);
350 		if (meta == REMOVE_META)
351 			tr->tr_num_buf_rm++;
352 		else
353 			tr->tr_num_databuf_rm++;
354 		set_bit(TR_TOUCHED, &tr->tr_flags);
355 		was_pinned = 1;
356 		brelse(bh);
357 	}
358 	if (bd) {
359 		if (bd->bd_tr) {
360 			gfs2_trans_add_revoke(sdp, bd);
361 		} else if (was_pinned) {
362 			bh->b_private = NULL;
363 			kmem_cache_free(gfs2_bufdata_cachep, bd);
364 		} else if (!list_empty(&bd->bd_ail_st_list) &&
365 					!list_empty(&bd->bd_ail_gl_list)) {
366 			gfs2_remove_from_ail(bd);
367 		}
368 	}
369 	clear_buffer_dirty(bh);
370 	clear_buffer_uptodate(bh);
371 }
372 
373 /**
374  * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list
375  * @sdp: superblock
376  * @bstart: starting block address of buffers to remove
377  * @blen: length of buffers to be removed
378  *
379  * This function is called from gfs2_journal wipe, whose job is to remove
380  * buffers, corresponding to deleted blocks, from the journal. If we find any
381  * bufdata elements on the system ail1 list, they haven't been written to
382  * the journal yet. So we remove them.
383  */
gfs2_ail1_wipe(struct gfs2_sbd * sdp,u64 bstart,u32 blen)384 static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
385 {
386 	struct gfs2_trans *tr, *s;
387 	struct gfs2_bufdata *bd, *bs;
388 	struct buffer_head *bh;
389 	u64 end = bstart + blen;
390 
391 	gfs2_log_lock(sdp);
392 	spin_lock(&sdp->sd_ail_lock);
393 	list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
394 		list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
395 					 bd_ail_st_list) {
396 			bh = bd->bd_bh;
397 			if (bh->b_blocknr < bstart || bh->b_blocknr >= end)
398 				continue;
399 
400 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
401 		}
402 	}
403 	spin_unlock(&sdp->sd_ail_lock);
404 	gfs2_log_unlock(sdp);
405 }
406 
gfs2_getjdatabuf(struct gfs2_inode * ip,u64 blkno)407 static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
408 {
409 	struct address_space *mapping = ip->i_inode.i_mapping;
410 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
411 	struct page *page;
412 	struct buffer_head *bh;
413 	unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
414 	unsigned long index = blkno >> shift; /* convert block to page */
415 	unsigned int bufnum = blkno - (index << shift);
416 
417 	page = find_get_page_flags(mapping, index, FGP_LOCK|FGP_ACCESSED);
418 	if (!page)
419 		return NULL;
420 	if (!page_has_buffers(page)) {
421 		unlock_page(page);
422 		put_page(page);
423 		return NULL;
424 	}
425 	/* Locate header for our buffer within our page */
426 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
427 		/* Do nothing */;
428 	get_bh(bh);
429 	unlock_page(page);
430 	put_page(page);
431 	return bh;
432 }
433 
434 /**
435  * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore
436  * @ip: the inode who owns the buffers
437  * @bstart: the first buffer in the run
438  * @blen: the number of buffers in the run
439  *
440  */
441 
gfs2_journal_wipe(struct gfs2_inode * ip,u64 bstart,u32 blen)442 void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
443 {
444 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
445 	struct buffer_head *bh;
446 	int ty;
447 
448 	if (!ip->i_gl) {
449 		/* This can only happen during incomplete inode creation. */
450 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
451 		return;
452 	}
453 
454 	gfs2_ail1_wipe(sdp, bstart, blen);
455 	while (blen) {
456 		ty = REMOVE_META;
457 		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
458 		if (!bh && gfs2_is_jdata(ip)) {
459 			bh = gfs2_getjdatabuf(ip, bstart);
460 			ty = REMOVE_JDATA;
461 		}
462 		if (bh) {
463 			lock_buffer(bh);
464 			gfs2_log_lock(sdp);
465 			spin_lock(&sdp->sd_ail_lock);
466 			gfs2_remove_from_journal(bh, ty);
467 			spin_unlock(&sdp->sd_ail_lock);
468 			gfs2_log_unlock(sdp);
469 			unlock_buffer(bh);
470 			brelse(bh);
471 		}
472 
473 		bstart++;
474 		blen--;
475 	}
476 }
477 
478 /**
479  * gfs2_meta_buffer - Get a metadata buffer
480  * @ip: The GFS2 inode
481  * @mtype: The block type (GFS2_METATYPE_*)
482  * @num: The block number (device relative) of the buffer
483  * @bhp: the buffer is returned here
484  *
485  * Returns: errno
486  */
487 
gfs2_meta_buffer(struct gfs2_inode * ip,u32 mtype,u64 num,struct buffer_head ** bhp)488 int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
489 		     struct buffer_head **bhp)
490 {
491 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
492 	struct gfs2_glock *gl = ip->i_gl;
493 	struct buffer_head *bh;
494 	int ret = 0;
495 	int rahead = 0;
496 
497 	if (num == ip->i_no_addr)
498 		rahead = ip->i_rahead;
499 
500 	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
501 	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
502 		brelse(bh);
503 		ret = -EIO;
504 	} else {
505 		*bhp = bh;
506 	}
507 	return ret;
508 }
509 
510 /**
511  * gfs2_meta_ra - start readahead on an extent of a file
512  * @gl: the glock the blocks belong to
513  * @dblock: the starting disk block
514  * @extlen: the number of blocks in the extent
515  *
516  * returns: the first buffer in the extent
517  */
518 
gfs2_meta_ra(struct gfs2_glock * gl,u64 dblock,u32 extlen)519 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
520 {
521 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
522 	struct buffer_head *first_bh, *bh;
523 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
524 			  sdp->sd_sb.sb_bsize_shift;
525 
526 	BUG_ON(!extlen);
527 
528 	if (max_ra < 1)
529 		max_ra = 1;
530 	if (extlen > max_ra)
531 		extlen = max_ra;
532 
533 	first_bh = gfs2_getbuf(gl, dblock, CREATE);
534 
535 	if (buffer_uptodate(first_bh))
536 		goto out;
537 	bh_read_nowait(first_bh, REQ_META | REQ_PRIO);
538 
539 	dblock++;
540 	extlen--;
541 
542 	while (extlen) {
543 		bh = gfs2_getbuf(gl, dblock, CREATE);
544 
545 		bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO);
546 		brelse(bh);
547 		dblock++;
548 		extlen--;
549 		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
550 			goto out;
551 	}
552 
553 	wait_on_buffer(first_bh);
554 out:
555 	return first_bh;
556 }
557 
558