xref: /openbmc/linux/fs/gfs2/meta_io.c (revision b58c6630)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/delay.h>
17 #include <linux/bio.h>
18 #include <linux/gfs2_ondisk.h>
19 
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "glock.h"
23 #include "glops.h"
24 #include "inode.h"
25 #include "log.h"
26 #include "lops.h"
27 #include "meta_io.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
31 #include "trace_gfs2.h"
32 
33 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
34 {
35 	struct buffer_head *bh, *head;
36 	int nr_underway = 0;
37 	int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
38 
39 	BUG_ON(!PageLocked(page));
40 	BUG_ON(!page_has_buffers(page));
41 
42 	head = page_buffers(page);
43 	bh = head;
44 
45 	do {
46 		if (!buffer_mapped(bh))
47 			continue;
48 		/*
49 		 * If it's a fully non-blocking write attempt and we cannot
50 		 * lock the buffer then redirty the page.  Note that this can
51 		 * potentially cause a busy-wait loop from flusher thread and kswapd
52 		 * activity, but those code paths have their own higher-level
53 		 * throttling.
54 		 */
55 		if (wbc->sync_mode != WB_SYNC_NONE) {
56 			lock_buffer(bh);
57 		} else if (!trylock_buffer(bh)) {
58 			redirty_page_for_writepage(wbc, page);
59 			continue;
60 		}
61 		if (test_clear_buffer_dirty(bh)) {
62 			mark_buffer_async_write(bh);
63 		} else {
64 			unlock_buffer(bh);
65 		}
66 	} while ((bh = bh->b_this_page) != head);
67 
68 	/*
69 	 * The page and its buffers are protected by PageWriteback(), so we can
70 	 * drop the bh refcounts early.
71 	 */
72 	BUG_ON(PageWriteback(page));
73 	set_page_writeback(page);
74 
75 	do {
76 		struct buffer_head *next = bh->b_this_page;
77 		if (buffer_async_write(bh)) {
78 			submit_bh(REQ_OP_WRITE, write_flags, bh);
79 			nr_underway++;
80 		}
81 		bh = next;
82 	} while (bh != head);
83 	unlock_page(page);
84 
85 	if (nr_underway == 0)
86 		end_page_writeback(page);
87 
88 	return 0;
89 }
90 
91 const struct address_space_operations gfs2_meta_aops = {
92 	.writepage = gfs2_aspace_writepage,
93 	.releasepage = gfs2_releasepage,
94 };
95 
96 const struct address_space_operations gfs2_rgrp_aops = {
97 	.writepage = gfs2_aspace_writepage,
98 	.releasepage = gfs2_releasepage,
99 };
100 
101 /**
102  * gfs2_getbuf - Get a buffer with a given address space
103  * @gl: the glock
104  * @blkno: the block number (filesystem scope)
105  * @create: 1 if the buffer should be created
106  *
107  * Returns: the buffer
108  */
109 
110 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
111 {
112 	struct address_space *mapping = gfs2_glock2aspace(gl);
113 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
114 	struct page *page;
115 	struct buffer_head *bh;
116 	unsigned int shift;
117 	unsigned long index;
118 	unsigned int bufnum;
119 
120 	if (mapping == NULL)
121 		mapping = &sdp->sd_aspace;
122 
123 	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
124 	index = blkno >> shift;             /* convert block to page */
125 	bufnum = blkno - (index << shift);  /* block buf index within page */
126 
127 	if (create) {
128 		for (;;) {
129 			page = grab_cache_page(mapping, index);
130 			if (page)
131 				break;
132 			yield();
133 		}
134 	} else {
135 		page = find_get_page_flags(mapping, index,
136 						FGP_LOCK|FGP_ACCESSED);
137 		if (!page)
138 			return NULL;
139 	}
140 
141 	if (!page_has_buffers(page))
142 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
143 
144 	/* Locate header for our buffer within our page */
145 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
146 		/* Do nothing */;
147 	get_bh(bh);
148 
149 	if (!buffer_mapped(bh))
150 		map_bh(bh, sdp->sd_vfs, blkno);
151 
152 	unlock_page(page);
153 	put_page(page);
154 
155 	return bh;
156 }
157 
158 static void meta_prep_new(struct buffer_head *bh)
159 {
160 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
161 
162 	lock_buffer(bh);
163 	clear_buffer_dirty(bh);
164 	set_buffer_uptodate(bh);
165 	unlock_buffer(bh);
166 
167 	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
168 }
169 
170 /**
171  * gfs2_meta_new - Get a block
172  * @gl: The glock associated with this block
173  * @blkno: The block number
174  *
175  * Returns: The buffer
176  */
177 
178 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
179 {
180 	struct buffer_head *bh;
181 	bh = gfs2_getbuf(gl, blkno, CREATE);
182 	meta_prep_new(bh);
183 	return bh;
184 }
185 
186 static void gfs2_meta_read_endio(struct bio *bio)
187 {
188 	struct bio_vec *bvec;
189 	struct bvec_iter_all iter_all;
190 
191 	bio_for_each_segment_all(bvec, bio, iter_all) {
192 		struct page *page = bvec->bv_page;
193 		struct buffer_head *bh = page_buffers(page);
194 		unsigned int len = bvec->bv_len;
195 
196 		while (bh_offset(bh) < bvec->bv_offset)
197 			bh = bh->b_this_page;
198 		do {
199 			struct buffer_head *next = bh->b_this_page;
200 			len -= bh->b_size;
201 			bh->b_end_io(bh, !bio->bi_status);
202 			bh = next;
203 		} while (bh && len);
204 	}
205 	bio_put(bio);
206 }
207 
208 /*
209  * Submit several consecutive buffer head I/O requests as a single bio I/O
210  * request.  (See submit_bh_wbc.)
211  */
212 static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
213 			    int num)
214 {
215 	while (num > 0) {
216 		struct buffer_head *bh = *bhs;
217 		struct bio *bio;
218 
219 		bio = bio_alloc(GFP_NOIO, num);
220 		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
221 		bio_set_dev(bio, bh->b_bdev);
222 		while (num > 0) {
223 			bh = *bhs;
224 			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
225 				BUG_ON(bio->bi_iter.bi_size == 0);
226 				break;
227 			}
228 			bhs++;
229 			num--;
230 		}
231 		bio->bi_end_io = gfs2_meta_read_endio;
232 		bio_set_op_attrs(bio, op, op_flags);
233 		submit_bio(bio);
234 	}
235 }
236 
237 /**
238  * gfs2_meta_read - Read a block from disk
239  * @gl: The glock covering the block
240  * @blkno: The block number
241  * @flags: flags
242  * @bhp: the place where the buffer is returned (NULL on failure)
243  *
244  * Returns: errno
245  */
246 
247 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
248 		   int rahead, struct buffer_head **bhp)
249 {
250 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
251 	struct buffer_head *bh, *bhs[2];
252 	int num = 0;
253 
254 	if (unlikely(gfs2_withdrawn(sdp)) &&
255 	    (!sdp->sd_jdesc || (blkno != sdp->sd_jdesc->jd_no_addr))) {
256 		*bhp = NULL;
257 		return -EIO;
258 	}
259 
260 	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
261 
262 	lock_buffer(bh);
263 	if (buffer_uptodate(bh)) {
264 		unlock_buffer(bh);
265 		flags &= ~DIO_WAIT;
266 	} else {
267 		bh->b_end_io = end_buffer_read_sync;
268 		get_bh(bh);
269 		bhs[num++] = bh;
270 	}
271 
272 	if (rahead) {
273 		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
274 
275 		lock_buffer(bh);
276 		if (buffer_uptodate(bh)) {
277 			unlock_buffer(bh);
278 			brelse(bh);
279 		} else {
280 			bh->b_end_io = end_buffer_read_sync;
281 			bhs[num++] = bh;
282 		}
283 	}
284 
285 	gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
286 	if (!(flags & DIO_WAIT))
287 		return 0;
288 
289 	bh = *bhp;
290 	wait_on_buffer(bh);
291 	if (unlikely(!buffer_uptodate(bh))) {
292 		struct gfs2_trans *tr = current->journal_info;
293 		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
294 			gfs2_io_error_bh_wd(sdp, bh);
295 		brelse(bh);
296 		*bhp = NULL;
297 		return -EIO;
298 	}
299 
300 	return 0;
301 }
302 
303 /**
304  * gfs2_meta_wait - Reread a block from disk
305  * @sdp: the filesystem
306  * @bh: The block to wait for
307  *
308  * Returns: errno
309  */
310 
311 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
312 {
313 	if (unlikely(gfs2_withdrawn(sdp)))
314 		return -EIO;
315 
316 	wait_on_buffer(bh);
317 
318 	if (!buffer_uptodate(bh)) {
319 		struct gfs2_trans *tr = current->journal_info;
320 		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
321 			gfs2_io_error_bh_wd(sdp, bh);
322 		return -EIO;
323 	}
324 	if (unlikely(gfs2_withdrawn(sdp)))
325 		return -EIO;
326 
327 	return 0;
328 }
329 
330 void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
331 {
332 	struct address_space *mapping = bh->b_page->mapping;
333 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
334 	struct gfs2_bufdata *bd = bh->b_private;
335 	struct gfs2_trans *tr = current->journal_info;
336 	int was_pinned = 0;
337 
338 	if (test_clear_buffer_pinned(bh)) {
339 		trace_gfs2_pin(bd, 0);
340 		atomic_dec(&sdp->sd_log_pinned);
341 		list_del_init(&bd->bd_list);
342 		if (meta == REMOVE_META)
343 			tr->tr_num_buf_rm++;
344 		else
345 			tr->tr_num_databuf_rm++;
346 		set_bit(TR_TOUCHED, &tr->tr_flags);
347 		was_pinned = 1;
348 		brelse(bh);
349 	}
350 	if (bd) {
351 		spin_lock(&sdp->sd_ail_lock);
352 		if (bd->bd_tr) {
353 			gfs2_trans_add_revoke(sdp, bd);
354 		} else if (was_pinned) {
355 			bh->b_private = NULL;
356 			kmem_cache_free(gfs2_bufdata_cachep, bd);
357 		}
358 		spin_unlock(&sdp->sd_ail_lock);
359 	}
360 	clear_buffer_dirty(bh);
361 	clear_buffer_uptodate(bh);
362 }
363 
364 /**
365  * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
366  * @ip: the inode who owns the buffers
367  * @bstart: the first buffer in the run
368  * @blen: the number of buffers in the run
369  *
370  */
371 
372 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
373 {
374 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
375 	struct buffer_head *bh;
376 
377 	while (blen) {
378 		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
379 		if (bh) {
380 			lock_buffer(bh);
381 			gfs2_log_lock(sdp);
382 			gfs2_remove_from_journal(bh, REMOVE_META);
383 			gfs2_log_unlock(sdp);
384 			unlock_buffer(bh);
385 			brelse(bh);
386 		}
387 
388 		bstart++;
389 		blen--;
390 	}
391 }
392 
393 /**
394  * gfs2_meta_indirect_buffer - Get a metadata buffer
395  * @ip: The GFS2 inode
396  * @height: The level of this buf in the metadata (indir addr) tree (if any)
397  * @num: The block number (device relative) of the buffer
398  * @bhp: the buffer is returned here
399  *
400  * Returns: errno
401  */
402 
403 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
404 			      struct buffer_head **bhp)
405 {
406 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
407 	struct gfs2_glock *gl = ip->i_gl;
408 	struct buffer_head *bh;
409 	int ret = 0;
410 	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
411 	int rahead = 0;
412 
413 	if (num == ip->i_no_addr)
414 		rahead = ip->i_rahead;
415 
416 	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
417 	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
418 		brelse(bh);
419 		ret = -EIO;
420 	} else {
421 		*bhp = bh;
422 	}
423 	return ret;
424 }
425 
426 /**
427  * gfs2_meta_ra - start readahead on an extent of a file
428  * @gl: the glock the blocks belong to
429  * @dblock: the starting disk block
430  * @extlen: the number of blocks in the extent
431  *
432  * returns: the first buffer in the extent
433  */
434 
435 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
436 {
437 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
438 	struct buffer_head *first_bh, *bh;
439 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
440 			  sdp->sd_sb.sb_bsize_shift;
441 
442 	BUG_ON(!extlen);
443 
444 	if (max_ra < 1)
445 		max_ra = 1;
446 	if (extlen > max_ra)
447 		extlen = max_ra;
448 
449 	first_bh = gfs2_getbuf(gl, dblock, CREATE);
450 
451 	if (buffer_uptodate(first_bh))
452 		goto out;
453 	if (!buffer_locked(first_bh))
454 		ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
455 
456 	dblock++;
457 	extlen--;
458 
459 	while (extlen) {
460 		bh = gfs2_getbuf(gl, dblock, CREATE);
461 
462 		if (!buffer_uptodate(bh) && !buffer_locked(bh))
463 			ll_rw_block(REQ_OP_READ,
464 				    REQ_RAHEAD | REQ_META | REQ_PRIO,
465 				    1, &bh);
466 		brelse(bh);
467 		dblock++;
468 		extlen--;
469 		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
470 			goto out;
471 	}
472 
473 	wait_on_buffer(first_bh);
474 out:
475 	return first_bh;
476 }
477 
478