xref: /openbmc/linux/fs/gfs2/meta_io.c (revision cc8bbe1a)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mm.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/bio.h>
21 #include <linux/gfs2_ondisk.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "rgrp.h"
32 #include "trans.h"
33 #include "util.h"
34 #include "trace_gfs2.h"
35 
36 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
37 {
38 	struct buffer_head *bh, *head;
39 	int nr_underway = 0;
40 	int write_op = REQ_META | REQ_PRIO |
41 		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
42 
43 	BUG_ON(!PageLocked(page));
44 	BUG_ON(!page_has_buffers(page));
45 
46 	head = page_buffers(page);
47 	bh = head;
48 
49 	do {
50 		if (!buffer_mapped(bh))
51 			continue;
52 		/*
53 		 * If it's a fully non-blocking write attempt and we cannot
54 		 * lock the buffer then redirty the page.  Note that this can
55 		 * potentially cause a busy-wait loop from flusher thread and kswapd
56 		 * activity, but those code paths have their own higher-level
57 		 * throttling.
58 		 */
59 		if (wbc->sync_mode != WB_SYNC_NONE) {
60 			lock_buffer(bh);
61 		} else if (!trylock_buffer(bh)) {
62 			redirty_page_for_writepage(wbc, page);
63 			continue;
64 		}
65 		if (test_clear_buffer_dirty(bh)) {
66 			mark_buffer_async_write(bh);
67 		} else {
68 			unlock_buffer(bh);
69 		}
70 	} while ((bh = bh->b_this_page) != head);
71 
72 	/*
73 	 * The page and its buffers are protected by PageWriteback(), so we can
74 	 * drop the bh refcounts early.
75 	 */
76 	BUG_ON(PageWriteback(page));
77 	set_page_writeback(page);
78 
79 	do {
80 		struct buffer_head *next = bh->b_this_page;
81 		if (buffer_async_write(bh)) {
82 			submit_bh(write_op, bh);
83 			nr_underway++;
84 		}
85 		bh = next;
86 	} while (bh != head);
87 	unlock_page(page);
88 
89 	if (nr_underway == 0)
90 		end_page_writeback(page);
91 
92 	return 0;
93 }
94 
95 const struct address_space_operations gfs2_meta_aops = {
96 	.writepage = gfs2_aspace_writepage,
97 	.releasepage = gfs2_releasepage,
98 };
99 
100 const struct address_space_operations gfs2_rgrp_aops = {
101 	.writepage = gfs2_aspace_writepage,
102 	.releasepage = gfs2_releasepage,
103 };
104 
105 /**
106  * gfs2_getbuf - Get a buffer with a given address space
107  * @gl: the glock
108  * @blkno: the block number (filesystem scope)
109  * @create: 1 if the buffer should be created
110  *
111  * Returns: the buffer
112  */
113 
114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115 {
116 	struct address_space *mapping = gfs2_glock2aspace(gl);
117 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118 	struct page *page;
119 	struct buffer_head *bh;
120 	unsigned int shift;
121 	unsigned long index;
122 	unsigned int bufnum;
123 
124 	if (mapping == NULL)
125 		mapping = &sdp->sd_aspace;
126 
127 	shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 	index = blkno >> shift;             /* convert block to page */
129 	bufnum = blkno - (index << shift);  /* block buf index within page */
130 
131 	if (create) {
132 		for (;;) {
133 			page = grab_cache_page(mapping, index);
134 			if (page)
135 				break;
136 			yield();
137 		}
138 	} else {
139 		page = find_get_page_flags(mapping, index,
140 						FGP_LOCK|FGP_ACCESSED);
141 		if (!page)
142 			return NULL;
143 	}
144 
145 	if (!page_has_buffers(page))
146 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
147 
148 	/* Locate header for our buffer within our page */
149 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
150 		/* Do nothing */;
151 	get_bh(bh);
152 
153 	if (!buffer_mapped(bh))
154 		map_bh(bh, sdp->sd_vfs, blkno);
155 
156 	unlock_page(page);
157 	page_cache_release(page);
158 
159 	return bh;
160 }
161 
162 static void meta_prep_new(struct buffer_head *bh)
163 {
164 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
165 
166 	lock_buffer(bh);
167 	clear_buffer_dirty(bh);
168 	set_buffer_uptodate(bh);
169 	unlock_buffer(bh);
170 
171 	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
172 }
173 
174 /**
175  * gfs2_meta_new - Get a block
176  * @gl: The glock associated with this block
177  * @blkno: The block number
178  *
179  * Returns: The buffer
180  */
181 
182 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
183 {
184 	struct buffer_head *bh;
185 	bh = gfs2_getbuf(gl, blkno, CREATE);
186 	meta_prep_new(bh);
187 	return bh;
188 }
189 
190 static void gfs2_meta_read_endio(struct bio *bio)
191 {
192 	struct bio_vec *bvec;
193 	int i;
194 
195 	bio_for_each_segment_all(bvec, bio, i) {
196 		struct page *page = bvec->bv_page;
197 		struct buffer_head *bh = page_buffers(page);
198 		unsigned int len = bvec->bv_len;
199 
200 		while (bh_offset(bh) < bvec->bv_offset)
201 			bh = bh->b_this_page;
202 		do {
203 			struct buffer_head *next = bh->b_this_page;
204 			len -= bh->b_size;
205 			bh->b_end_io(bh, !bio->bi_error);
206 			bh = next;
207 		} while (bh && len);
208 	}
209 	bio_put(bio);
210 }
211 
212 /*
213  * Submit several consecutive buffer head I/O requests as a single bio I/O
214  * request.  (See submit_bh_wbc.)
215  */
216 static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
217 {
218 	struct buffer_head *bh = bhs[0];
219 	struct bio *bio;
220 	int i;
221 
222 	if (!num)
223 		return;
224 
225 	bio = bio_alloc(GFP_NOIO, num);
226 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
227 	bio->bi_bdev = bh->b_bdev;
228 	for (i = 0; i < num; i++) {
229 		bh = bhs[i];
230 		bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
231 	}
232 	bio->bi_end_io = gfs2_meta_read_endio;
233 	submit_bio(rw, bio);
234 }
235 
236 /**
237  * gfs2_meta_read - Read a block from disk
238  * @gl: The glock covering the block
239  * @blkno: The block number
240  * @flags: flags
241  * @bhp: the place where the buffer is returned (NULL on failure)
242  *
243  * Returns: errno
244  */
245 
246 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
247 		   int rahead, struct buffer_head **bhp)
248 {
249 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
250 	struct buffer_head *bh, *bhs[2];
251 	int num = 0;
252 
253 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
254 		*bhp = NULL;
255 		return -EIO;
256 	}
257 
258 	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
259 
260 	lock_buffer(bh);
261 	if (buffer_uptodate(bh)) {
262 		unlock_buffer(bh);
263 		flags &= ~DIO_WAIT;
264 	} else {
265 		bh->b_end_io = end_buffer_read_sync;
266 		get_bh(bh);
267 		bhs[num++] = bh;
268 	}
269 
270 	if (rahead) {
271 		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
272 
273 		lock_buffer(bh);
274 		if (buffer_uptodate(bh)) {
275 			unlock_buffer(bh);
276 			brelse(bh);
277 		} else {
278 			bh->b_end_io = end_buffer_read_sync;
279 			bhs[num++] = bh;
280 		}
281 	}
282 
283 	gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
284 	if (!(flags & DIO_WAIT))
285 		return 0;
286 
287 	bh = *bhp;
288 	wait_on_buffer(bh);
289 	if (unlikely(!buffer_uptodate(bh))) {
290 		struct gfs2_trans *tr = current->journal_info;
291 		if (tr && tr->tr_touched)
292 			gfs2_io_error_bh(sdp, bh);
293 		brelse(bh);
294 		*bhp = NULL;
295 		return -EIO;
296 	}
297 
298 	return 0;
299 }
300 
301 /**
302  * gfs2_meta_wait - Reread a block from disk
303  * @sdp: the filesystem
304  * @bh: The block to wait for
305  *
306  * Returns: errno
307  */
308 
309 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
310 {
311 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
312 		return -EIO;
313 
314 	wait_on_buffer(bh);
315 
316 	if (!buffer_uptodate(bh)) {
317 		struct gfs2_trans *tr = current->journal_info;
318 		if (tr && tr->tr_touched)
319 			gfs2_io_error_bh(sdp, bh);
320 		return -EIO;
321 	}
322 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
323 		return -EIO;
324 
325 	return 0;
326 }
327 
328 void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
329 {
330 	struct address_space *mapping = bh->b_page->mapping;
331 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
332 	struct gfs2_bufdata *bd = bh->b_private;
333 	int was_pinned = 0;
334 
335 	if (test_clear_buffer_pinned(bh)) {
336 		trace_gfs2_pin(bd, 0);
337 		atomic_dec(&sdp->sd_log_pinned);
338 		list_del_init(&bd->bd_list);
339 		if (meta)
340 			tr->tr_num_buf_rm++;
341 		else
342 			tr->tr_num_databuf_rm++;
343 		tr->tr_touched = 1;
344 		was_pinned = 1;
345 		brelse(bh);
346 	}
347 	if (bd) {
348 		spin_lock(&sdp->sd_ail_lock);
349 		if (bd->bd_tr) {
350 			gfs2_trans_add_revoke(sdp, bd);
351 		} else if (was_pinned) {
352 			bh->b_private = NULL;
353 			kmem_cache_free(gfs2_bufdata_cachep, bd);
354 		}
355 		spin_unlock(&sdp->sd_ail_lock);
356 	}
357 	clear_buffer_dirty(bh);
358 	clear_buffer_uptodate(bh);
359 }
360 
361 /**
362  * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
363  * @ip: the inode who owns the buffers
364  * @bstart: the first buffer in the run
365  * @blen: the number of buffers in the run
366  *
367  */
368 
369 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
370 {
371 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
372 	struct buffer_head *bh;
373 
374 	while (blen) {
375 		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
376 		if (bh) {
377 			lock_buffer(bh);
378 			gfs2_log_lock(sdp);
379 			gfs2_remove_from_journal(bh, current->journal_info, 1);
380 			gfs2_log_unlock(sdp);
381 			unlock_buffer(bh);
382 			brelse(bh);
383 		}
384 
385 		bstart++;
386 		blen--;
387 	}
388 }
389 
390 /**
391  * gfs2_meta_indirect_buffer - Get a metadata buffer
392  * @ip: The GFS2 inode
393  * @height: The level of this buf in the metadata (indir addr) tree (if any)
394  * @num: The block number (device relative) of the buffer
395  * @bhp: the buffer is returned here
396  *
397  * Returns: errno
398  */
399 
400 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
401 			      struct buffer_head **bhp)
402 {
403 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
404 	struct gfs2_glock *gl = ip->i_gl;
405 	struct buffer_head *bh;
406 	int ret = 0;
407 	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
408 	int rahead = 0;
409 
410 	if (num == ip->i_no_addr)
411 		rahead = ip->i_rahead;
412 
413 	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
414 	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
415 		brelse(bh);
416 		ret = -EIO;
417 	}
418 	*bhp = bh;
419 	return ret;
420 }
421 
422 /**
423  * gfs2_meta_ra - start readahead on an extent of a file
424  * @gl: the glock the blocks belong to
425  * @dblock: the starting disk block
426  * @extlen: the number of blocks in the extent
427  *
428  * returns: the first buffer in the extent
429  */
430 
431 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
432 {
433 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
434 	struct buffer_head *first_bh, *bh;
435 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
436 			  sdp->sd_sb.sb_bsize_shift;
437 
438 	BUG_ON(!extlen);
439 
440 	if (max_ra < 1)
441 		max_ra = 1;
442 	if (extlen > max_ra)
443 		extlen = max_ra;
444 
445 	first_bh = gfs2_getbuf(gl, dblock, CREATE);
446 
447 	if (buffer_uptodate(first_bh))
448 		goto out;
449 	if (!buffer_locked(first_bh))
450 		ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
451 
452 	dblock++;
453 	extlen--;
454 
455 	while (extlen) {
456 		bh = gfs2_getbuf(gl, dblock, CREATE);
457 
458 		if (!buffer_uptodate(bh) && !buffer_locked(bh))
459 			ll_rw_block(READA | REQ_META, 1, &bh);
460 		brelse(bh);
461 		dblock++;
462 		extlen--;
463 		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
464 			goto out;
465 	}
466 
467 	wait_on_buffer(first_bh);
468 out:
469 	return first_bh;
470 }
471 
472