xref: /openbmc/linux/fs/gfs2/lops.c (revision eb431351)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18 
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "log.h"
26 #include "lops.h"
27 #include "meta_io.h"
28 #include "recovery.h"
29 #include "rgrp.h"
30 #include "trans.h"
31 #include "util.h"
32 #include "trace_gfs2.h"
33 
34 /**
35  * gfs2_pin - Pin a buffer in memory
36  * @sdp: The superblock
37  * @bh: The buffer to be pinned
38  *
39  * The log lock must be held when calling this function
40  */
41 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42 {
43 	struct gfs2_bufdata *bd;
44 
45 	BUG_ON(!current->journal_info);
46 
47 	clear_buffer_dirty(bh);
48 	if (test_set_buffer_pinned(bh))
49 		gfs2_assert_withdraw(sdp, 0);
50 	if (!buffer_uptodate(bh))
51 		gfs2_io_error_bh_wd(sdp, bh);
52 	bd = bh->b_private;
53 	/* If this buffer is in the AIL and it has already been written
54 	 * to in-place disk block, remove it from the AIL.
55 	 */
56 	spin_lock(&sdp->sd_ail_lock);
57 	if (bd->bd_tr)
58 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
59 	spin_unlock(&sdp->sd_ail_lock);
60 	get_bh(bh);
61 	atomic_inc(&sdp->sd_log_pinned);
62 	trace_gfs2_pin(bd, 1);
63 }
64 
65 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66 {
67 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
68 }
69 
70 static void maybe_release_space(struct gfs2_bufdata *bd)
71 {
72 	struct gfs2_glock *gl = bd->bd_gl;
73 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
75 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
77 
78 	if (bi->bi_clone == NULL)
79 		return;
80 	if (sdp->sd_args.ar_discard)
81 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
82 	memcpy(bi->bi_clone + bi->bi_offset,
83 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
84 	clear_bit(GBF_FULL, &bi->bi_flags);
85 	rgd->rd_free_clone = rgd->rd_free;
86 	rgd->rd_extfail_pt = rgd->rd_free;
87 }
88 
89 /**
90  * gfs2_unpin - Unpin a buffer
91  * @sdp: the filesystem the buffer belongs to
92  * @bh: The buffer to unpin
93  * @ai:
94  * @flags: The inode dirty flags
95  *
96  */
97 
98 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 		       struct gfs2_trans *tr)
100 {
101 	struct gfs2_bufdata *bd = bh->b_private;
102 
103 	BUG_ON(!buffer_uptodate(bh));
104 	BUG_ON(!buffer_pinned(bh));
105 
106 	lock_buffer(bh);
107 	mark_buffer_dirty(bh);
108 	clear_buffer_pinned(bh);
109 
110 	if (buffer_is_rgrp(bd))
111 		maybe_release_space(bd);
112 
113 	spin_lock(&sdp->sd_ail_lock);
114 	if (bd->bd_tr) {
115 		list_del(&bd->bd_ail_st_list);
116 		brelse(bh);
117 	} else {
118 		struct gfs2_glock *gl = bd->bd_gl;
119 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
120 		atomic_inc(&gl->gl_ail_count);
121 	}
122 	bd->bd_tr = tr;
123 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 	spin_unlock(&sdp->sd_ail_lock);
125 
126 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
127 	trace_gfs2_pin(bd, 0);
128 	unlock_buffer(bh);
129 	atomic_dec(&sdp->sd_log_pinned);
130 }
131 
132 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
133 {
134 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
136 
137 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
138 		sdp->sd_log_flush_head = 0;
139 }
140 
141 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
142 {
143 	struct gfs2_journal_extent *je;
144 
145 	list_for_each_entry(je, &jd->extent_list, list) {
146 		if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
147 			return je->dblock + lblock - je->lblock;
148 	}
149 
150 	return -1;
151 }
152 
153 /**
154  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
155  * @sdp: The superblock
156  * @bvec: The bio_vec
157  * @error: The i/o status
158  *
159  * This finds the relevant buffers and unlocks them and sets the
160  * error flag according to the status of the i/o request. This is
161  * used when the log is writing data which has an in-place version
162  * that is pinned in the pagecache.
163  */
164 
165 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
166 				  struct bio_vec *bvec,
167 				  blk_status_t error)
168 {
169 	struct buffer_head *bh, *next;
170 	struct page *page = bvec->bv_page;
171 	unsigned size;
172 
173 	bh = page_buffers(page);
174 	size = bvec->bv_len;
175 	while (bh_offset(bh) < bvec->bv_offset)
176 		bh = bh->b_this_page;
177 	do {
178 		if (error)
179 			mark_buffer_write_io_error(bh);
180 		unlock_buffer(bh);
181 		next = bh->b_this_page;
182 		size -= bh->b_size;
183 		brelse(bh);
184 		bh = next;
185 	} while(bh && size);
186 }
187 
188 /**
189  * gfs2_end_log_write - end of i/o to the log
190  * @bio: The bio
191  *
192  * Each bio_vec contains either data from the pagecache or data
193  * relating to the log itself. Here we iterate over the bio_vec
194  * array, processing both kinds of data.
195  *
196  */
197 
198 static void gfs2_end_log_write(struct bio *bio)
199 {
200 	struct gfs2_sbd *sdp = bio->bi_private;
201 	struct bio_vec *bvec;
202 	struct page *page;
203 	struct bvec_iter_all iter_all;
204 
205 	if (bio->bi_status) {
206 		fs_err(sdp, "Error %d writing to journal, jid=%u\n",
207 		       bio->bi_status, sdp->sd_jdesc->jd_jid);
208 		wake_up(&sdp->sd_logd_waitq);
209 	}
210 
211 	bio_for_each_segment_all(bvec, bio, iter_all) {
212 		page = bvec->bv_page;
213 		if (page_has_buffers(page))
214 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
215 		else
216 			mempool_free(page, gfs2_page_pool);
217 	}
218 
219 	bio_put(bio);
220 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
221 		wake_up(&sdp->sd_log_flush_wait);
222 }
223 
224 /**
225  * gfs2_log_submit_bio - Submit any pending log bio
226  * @biop: Address of the bio pointer
227  * @opf: REQ_OP | op_flags
228  *
229  * Submit any pending part-built or full bio to the block device. If
230  * there is no pending bio, then this is a no-op.
231  */
232 
233 void gfs2_log_submit_bio(struct bio **biop, int opf)
234 {
235 	struct bio *bio = *biop;
236 	if (bio) {
237 		struct gfs2_sbd *sdp = bio->bi_private;
238 		atomic_inc(&sdp->sd_log_in_flight);
239 		bio->bi_opf = opf;
240 		submit_bio(bio);
241 		*biop = NULL;
242 	}
243 }
244 
245 /**
246  * gfs2_log_alloc_bio - Allocate a bio
247  * @sdp: The super block
248  * @blkno: The device block number we want to write to
249  * @end_io: The bi_end_io callback
250  *
251  * Allocate a new bio, initialize it with the given parameters and return it.
252  *
253  * Returns: The newly allocated bio
254  */
255 
256 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
257 				      bio_end_io_t *end_io)
258 {
259 	struct super_block *sb = sdp->sd_vfs;
260 	struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
261 
262 	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
263 	bio_set_dev(bio, sb->s_bdev);
264 	bio->bi_end_io = end_io;
265 	bio->bi_private = sdp;
266 
267 	return bio;
268 }
269 
270 /**
271  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
272  * @sdp: The super block
273  * @blkno: The device block number we want to write to
274  * @bio: The bio to get or allocate
275  * @op: REQ_OP
276  * @end_io: The bi_end_io callback
277  * @flush: Always flush the current bio and allocate a new one?
278  *
279  * If there is a cached bio, then if the next block number is sequential
280  * with the previous one, return it, otherwise flush the bio to the
281  * device. If there is no cached bio, or we just flushed it, then
282  * allocate a new one.
283  *
284  * Returns: The bio to use for log writes
285  */
286 
287 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
288 				    struct bio **biop, int op,
289 				    bio_end_io_t *end_io, bool flush)
290 {
291 	struct bio *bio = *biop;
292 
293 	if (bio) {
294 		u64 nblk;
295 
296 		nblk = bio_end_sector(bio);
297 		nblk >>= sdp->sd_fsb2bb_shift;
298 		if (blkno == nblk && !flush)
299 			return bio;
300 		gfs2_log_submit_bio(biop, op);
301 	}
302 
303 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
304 	return *biop;
305 }
306 
307 /**
308  * gfs2_log_write - write to log
309  * @sdp: the filesystem
310  * @page: the page to write
311  * @size: the size of the data to write
312  * @offset: the offset within the page
313  * @blkno: block number of the log entry
314  *
315  * Try and add the page segment to the current bio. If that fails,
316  * submit the current bio to the device and create a new one, and
317  * then add the page segment to that.
318  */
319 
320 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
321 		    unsigned size, unsigned offset, u64 blkno)
322 {
323 	struct bio *bio;
324 	int ret;
325 
326 	bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
327 			       gfs2_end_log_write, false);
328 	ret = bio_add_page(bio, page, size, offset);
329 	if (ret == 0) {
330 		bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
331 				       REQ_OP_WRITE, gfs2_end_log_write, true);
332 		ret = bio_add_page(bio, page, size, offset);
333 		WARN_ON(ret == 0);
334 	}
335 }
336 
337 /**
338  * gfs2_log_write_bh - write a buffer's content to the log
339  * @sdp: The super block
340  * @bh: The buffer pointing to the in-place location
341  *
342  * This writes the content of the buffer to the next available location
343  * in the log. The buffer will be unlocked once the i/o to the log has
344  * completed.
345  */
346 
347 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
348 {
349 	u64 dblock;
350 
351 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
352 	gfs2_log_incr_head(sdp);
353 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
354 }
355 
356 /**
357  * gfs2_log_write_page - write one block stored in a page, into the log
358  * @sdp: The superblock
359  * @page: The struct page
360  *
361  * This writes the first block-sized part of the page into the log. Note
362  * that the page must have been allocated from the gfs2_page_pool mempool
363  * and that after this has been called, ownership has been transferred and
364  * the page may be freed at any time.
365  */
366 
367 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
368 {
369 	struct super_block *sb = sdp->sd_vfs;
370 	u64 dblock;
371 
372 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
373 	gfs2_log_incr_head(sdp);
374 	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
375 }
376 
377 /**
378  * gfs2_end_log_read - end I/O callback for reads from the log
379  * @bio: The bio
380  *
381  * Simply unlock the pages in the bio. The main thread will wait on them and
382  * process them in order as necessary.
383  */
384 
385 static void gfs2_end_log_read(struct bio *bio)
386 {
387 	struct page *page;
388 	struct bio_vec *bvec;
389 	struct bvec_iter_all iter_all;
390 
391 	bio_for_each_segment_all(bvec, bio, iter_all) {
392 		page = bvec->bv_page;
393 		if (bio->bi_status) {
394 			int err = blk_status_to_errno(bio->bi_status);
395 
396 			SetPageError(page);
397 			mapping_set_error(page->mapping, err);
398 		}
399 		unlock_page(page);
400 	}
401 
402 	bio_put(bio);
403 }
404 
405 /**
406  * gfs2_jhead_pg_srch - Look for the journal head in a given page.
407  * @jd: The journal descriptor
408  * @page: The page to look in
409  *
410  * Returns: 1 if found, 0 otherwise.
411  */
412 
413 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
414 			      struct gfs2_log_header_host *head,
415 			      struct page *page)
416 {
417 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
418 	struct gfs2_log_header_host uninitialized_var(lh);
419 	void *kaddr = kmap_atomic(page);
420 	unsigned int offset;
421 	bool ret = false;
422 
423 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
424 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
425 			if (lh.lh_sequence > head->lh_sequence)
426 				*head = lh;
427 			else {
428 				ret = true;
429 				break;
430 			}
431 		}
432 	}
433 	kunmap_atomic(kaddr);
434 	return ret;
435 }
436 
437 /**
438  * gfs2_jhead_process_page - Search/cleanup a page
439  * @jd: The journal descriptor
440  * @index: Index of the page to look into
441  * @done: If set, perform only cleanup, else search and set if found.
442  *
443  * Find the page with 'index' in the journal's mapping. Search the page for
444  * the journal head if requested (cleanup == false). Release refs on the
445  * page so the page cache can reclaim it (put_page() twice). We grabbed a
446  * reference on this page two times, first when we did a find_or_create_page()
447  * to obtain the page to add it to the bio and second when we do a
448  * find_get_page() here to get the page to wait on while I/O on it is being
449  * completed.
450  * This function is also used to free up a page we might've grabbed but not
451  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
452  * submitted the I/O, but we already found the jhead so we only need to drop
453  * our references to the page.
454  */
455 
456 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
457 				    struct gfs2_log_header_host *head,
458 				    bool *done)
459 {
460 	struct page *page;
461 
462 	page = find_get_page(jd->jd_inode->i_mapping, index);
463 	wait_on_page_locked(page);
464 
465 	if (PageError(page))
466 		*done = true;
467 
468 	if (!*done)
469 		*done = gfs2_jhead_pg_srch(jd, head, page);
470 
471 	put_page(page); /* Once for find_get_page */
472 	put_page(page); /* Once more for find_or_create_page */
473 }
474 
475 /**
476  * gfs2_find_jhead - find the head of a log
477  * @jd: The journal descriptor
478  * @head: The log descriptor for the head of the log is returned here
479  *
480  * Do a search of a journal by reading it in large chunks using bios and find
481  * the valid log entry with the highest sequence number.  (i.e. the log head)
482  *
483  * Returns: 0 on success, errno otherwise
484  */
485 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
486 		    bool keep_cache)
487 {
488 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
489 	struct address_space *mapping = jd->jd_inode->i_mapping;
490 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
491 	unsigned int bsize = sdp->sd_sb.sb_bsize;
492 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
493 	unsigned int shift = PAGE_SHIFT - bsize_shift;
494 	unsigned int readhead_blocks = BIO_MAX_PAGES << shift;
495 	struct gfs2_journal_extent *je;
496 	int sz, ret = 0;
497 	struct bio *bio = NULL;
498 	struct page *page = NULL;
499 	bool done = false;
500 	errseq_t since;
501 
502 	memset(head, 0, sizeof(*head));
503 	if (list_empty(&jd->extent_list))
504 		gfs2_map_journal_extents(sdp, jd);
505 
506 	since = filemap_sample_wb_err(mapping);
507 	list_for_each_entry(je, &jd->extent_list, list) {
508 		for (; block < je->lblock + je->blocks; block++) {
509 			u64 dblock;
510 
511 			if (!page) {
512 				page = find_or_create_page(mapping,
513 						block >> shift, GFP_NOFS);
514 				if (!page) {
515 					ret = -ENOMEM;
516 					done = true;
517 					goto out;
518 				}
519 			}
520 
521 			if (bio) {
522 				unsigned int off;
523 
524 				off = (block << bsize_shift) & ~PAGE_MASK;
525 				sz = bio_add_page(bio, page, bsize, off);
526 				if (sz == bsize) { /* block added */
527 					if (off + bsize == PAGE_SIZE) {
528 						page = NULL;
529 						goto page_added;
530 					}
531 					continue;
532 				}
533 				blocks_submitted = block + 1;
534 				submit_bio(bio);
535 				bio = NULL;
536 			}
537 
538 			dblock = je->dblock + (block - je->lblock);
539 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
540 			bio->bi_opf = REQ_OP_READ;
541 			sz = bio_add_page(bio, page, bsize, 0);
542 			gfs2_assert_warn(sdp, sz == bsize);
543 			if (bsize == PAGE_SIZE)
544 				page = NULL;
545 
546 page_added:
547 			if (blocks_submitted < blocks_read + readhead_blocks) {
548 				/* Keep at least one bio in flight */
549 				continue;
550 			}
551 
552 			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
553 			blocks_read += PAGE_SIZE >> bsize_shift;
554 			if (done)
555 				goto out;  /* found */
556 		}
557 	}
558 
559 out:
560 	if (bio)
561 		submit_bio(bio);
562 	while (blocks_read < block) {
563 		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
564 		blocks_read += PAGE_SIZE >> bsize_shift;
565 	}
566 
567 	if (!ret)
568 		ret = filemap_check_wb_err(mapping, since);
569 
570 	if (!keep_cache)
571 		truncate_inode_pages(mapping, 0);
572 
573 	return ret;
574 }
575 
576 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
577 				      u32 ld_length, u32 ld_data1)
578 {
579 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
580 	struct gfs2_log_descriptor *ld = page_address(page);
581 	clear_page(ld);
582 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
583 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
584 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
585 	ld->ld_type = cpu_to_be32(ld_type);
586 	ld->ld_length = cpu_to_be32(ld_length);
587 	ld->ld_data1 = cpu_to_be32(ld_data1);
588 	ld->ld_data2 = 0;
589 	return page;
590 }
591 
592 static void gfs2_check_magic(struct buffer_head *bh)
593 {
594 	void *kaddr;
595 	__be32 *ptr;
596 
597 	clear_buffer_escaped(bh);
598 	kaddr = kmap_atomic(bh->b_page);
599 	ptr = kaddr + bh_offset(bh);
600 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
601 		set_buffer_escaped(bh);
602 	kunmap_atomic(kaddr);
603 }
604 
605 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
606 {
607 	struct gfs2_bufdata *bda, *bdb;
608 
609 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
610 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
611 
612 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
613 		return -1;
614 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
615 		return 1;
616 	return 0;
617 }
618 
619 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
620 				unsigned int total, struct list_head *blist,
621 				bool is_databuf)
622 {
623 	struct gfs2_log_descriptor *ld;
624 	struct gfs2_bufdata *bd1 = NULL, *bd2;
625 	struct page *page;
626 	unsigned int num;
627 	unsigned n;
628 	__be64 *ptr;
629 
630 	gfs2_log_lock(sdp);
631 	list_sort(NULL, blist, blocknr_cmp);
632 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
633 	while(total) {
634 		num = total;
635 		if (total > limit)
636 			num = limit;
637 		gfs2_log_unlock(sdp);
638 		page = gfs2_get_log_desc(sdp,
639 					 is_databuf ? GFS2_LOG_DESC_JDATA :
640 					 GFS2_LOG_DESC_METADATA, num + 1, num);
641 		ld = page_address(page);
642 		gfs2_log_lock(sdp);
643 		ptr = (__be64 *)(ld + 1);
644 
645 		n = 0;
646 		list_for_each_entry_continue(bd1, blist, bd_list) {
647 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
648 			if (is_databuf) {
649 				gfs2_check_magic(bd1->bd_bh);
650 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
651 			}
652 			if (++n >= num)
653 				break;
654 		}
655 
656 		gfs2_log_unlock(sdp);
657 		gfs2_log_write_page(sdp, page);
658 		gfs2_log_lock(sdp);
659 
660 		n = 0;
661 		list_for_each_entry_continue(bd2, blist, bd_list) {
662 			get_bh(bd2->bd_bh);
663 			gfs2_log_unlock(sdp);
664 			lock_buffer(bd2->bd_bh);
665 
666 			if (buffer_escaped(bd2->bd_bh)) {
667 				void *kaddr;
668 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
669 				ptr = page_address(page);
670 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
671 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
672 				       bd2->bd_bh->b_size);
673 				kunmap_atomic(kaddr);
674 				*(__be32 *)ptr = 0;
675 				clear_buffer_escaped(bd2->bd_bh);
676 				unlock_buffer(bd2->bd_bh);
677 				brelse(bd2->bd_bh);
678 				gfs2_log_write_page(sdp, page);
679 			} else {
680 				gfs2_log_write_bh(sdp, bd2->bd_bh);
681 			}
682 			gfs2_log_lock(sdp);
683 			if (++n >= num)
684 				break;
685 		}
686 
687 		BUG_ON(total < num);
688 		total -= num;
689 	}
690 	gfs2_log_unlock(sdp);
691 }
692 
693 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
694 {
695 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
696 	unsigned int nbuf;
697 	if (tr == NULL)
698 		return;
699 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
700 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
701 }
702 
703 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
704 {
705 	struct list_head *head;
706 	struct gfs2_bufdata *bd;
707 
708 	if (tr == NULL)
709 		return;
710 
711 	head = &tr->tr_buf;
712 	while (!list_empty(head)) {
713 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
714 		list_del_init(&bd->bd_list);
715 		gfs2_unpin(sdp, bd->bd_bh, tr);
716 	}
717 }
718 
719 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
720 			       struct gfs2_log_header_host *head, int pass)
721 {
722 	if (pass != 0)
723 		return;
724 
725 	jd->jd_found_blocks = 0;
726 	jd->jd_replayed_blocks = 0;
727 }
728 
729 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
730 				struct gfs2_log_descriptor *ld, __be64 *ptr,
731 				int pass)
732 {
733 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
734 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
735 	struct gfs2_glock *gl = ip->i_gl;
736 	unsigned int blks = be32_to_cpu(ld->ld_data1);
737 	struct buffer_head *bh_log, *bh_ip;
738 	u64 blkno;
739 	int error = 0;
740 
741 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
742 		return 0;
743 
744 	gfs2_replay_incr_blk(jd, &start);
745 
746 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
747 		blkno = be64_to_cpu(*ptr++);
748 
749 		jd->jd_found_blocks++;
750 
751 		if (gfs2_revoke_check(jd, blkno, start))
752 			continue;
753 
754 		error = gfs2_replay_read_block(jd, start, &bh_log);
755 		if (error)
756 			return error;
757 
758 		bh_ip = gfs2_meta_new(gl, blkno);
759 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
760 
761 		if (gfs2_meta_check(sdp, bh_ip))
762 			error = -EIO;
763 		else {
764 			struct gfs2_meta_header *mh =
765 				(struct gfs2_meta_header *)bh_ip->b_data;
766 
767 			if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
768 				struct gfs2_rgrpd *rgd;
769 
770 				rgd = gfs2_blk2rgrpd(sdp, blkno, false);
771 				if (rgd && rgd->rd_addr == blkno &&
772 				    rgd->rd_bits && rgd->rd_bits->bi_bh) {
773 					fs_info(sdp, "Replaying 0x%llx but we "
774 						"already have a bh!\n",
775 						(unsigned long long)blkno);
776 					fs_info(sdp, "busy:%d, pinned:%d\n",
777 						buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
778 						buffer_pinned(rgd->rd_bits->bi_bh));
779 					gfs2_dump_glock(NULL, rgd->rd_gl, true);
780 				}
781 			}
782 			mark_buffer_dirty(bh_ip);
783 		}
784 		brelse(bh_log);
785 		brelse(bh_ip);
786 
787 		if (error)
788 			break;
789 
790 		jd->jd_replayed_blocks++;
791 	}
792 
793 	return error;
794 }
795 
796 /**
797  * gfs2_meta_sync - Sync all buffers associated with a glock
798  * @gl: The glock
799  *
800  */
801 
802 static void gfs2_meta_sync(struct gfs2_glock *gl)
803 {
804 	struct address_space *mapping = gfs2_glock2aspace(gl);
805 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
806 	int error;
807 
808 	if (mapping == NULL)
809 		mapping = &sdp->sd_aspace;
810 
811 	filemap_fdatawrite(mapping);
812 	error = filemap_fdatawait(mapping);
813 
814 	if (error)
815 		gfs2_io_error(gl->gl_name.ln_sbd);
816 }
817 
818 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
819 {
820 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
821 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
822 
823 	if (error) {
824 		gfs2_meta_sync(ip->i_gl);
825 		return;
826 	}
827 	if (pass != 1)
828 		return;
829 
830 	gfs2_meta_sync(ip->i_gl);
831 
832 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
833 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
834 }
835 
836 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
837 {
838 	struct gfs2_meta_header *mh;
839 	unsigned int offset;
840 	struct list_head *head = &sdp->sd_log_revokes;
841 	struct gfs2_bufdata *bd;
842 	struct page *page;
843 	unsigned int length;
844 
845 	gfs2_write_revokes(sdp);
846 	if (!sdp->sd_log_num_revoke)
847 		return;
848 
849 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
850 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
851 	offset = sizeof(struct gfs2_log_descriptor);
852 
853 	list_for_each_entry(bd, head, bd_list) {
854 		sdp->sd_log_num_revoke--;
855 
856 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
857 
858 			gfs2_log_write_page(sdp, page);
859 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
860 			mh = page_address(page);
861 			clear_page(mh);
862 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
863 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
864 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
865 			offset = sizeof(struct gfs2_meta_header);
866 		}
867 
868 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
869 		offset += sizeof(u64);
870 	}
871 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
872 
873 	gfs2_log_write_page(sdp, page);
874 }
875 
876 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
877 {
878 	struct list_head *head = &sdp->sd_log_revokes;
879 	struct gfs2_bufdata *bd;
880 	struct gfs2_glock *gl;
881 
882 	while (!list_empty(head)) {
883 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
884 		list_del_init(&bd->bd_list);
885 		gl = bd->bd_gl;
886 		gfs2_glock_remove_revoke(gl);
887 		kmem_cache_free(gfs2_bufdata_cachep, bd);
888 	}
889 }
890 
891 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
892 				  struct gfs2_log_header_host *head, int pass)
893 {
894 	if (pass != 0)
895 		return;
896 
897 	jd->jd_found_revokes = 0;
898 	jd->jd_replay_tail = head->lh_tail;
899 }
900 
901 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
902 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
903 				   int pass)
904 {
905 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
906 	unsigned int blks = be32_to_cpu(ld->ld_length);
907 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
908 	struct buffer_head *bh;
909 	unsigned int offset;
910 	u64 blkno;
911 	int first = 1;
912 	int error;
913 
914 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
915 		return 0;
916 
917 	offset = sizeof(struct gfs2_log_descriptor);
918 
919 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
920 		error = gfs2_replay_read_block(jd, start, &bh);
921 		if (error)
922 			return error;
923 
924 		if (!first)
925 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
926 
927 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
928 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
929 
930 			error = gfs2_revoke_add(jd, blkno, start);
931 			if (error < 0) {
932 				brelse(bh);
933 				return error;
934 			}
935 			else if (error)
936 				jd->jd_found_revokes++;
937 
938 			if (!--revokes)
939 				break;
940 			offset += sizeof(u64);
941 		}
942 
943 		brelse(bh);
944 		offset = sizeof(struct gfs2_meta_header);
945 		first = 0;
946 	}
947 
948 	return 0;
949 }
950 
951 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
952 {
953 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
954 
955 	if (error) {
956 		gfs2_revoke_clean(jd);
957 		return;
958 	}
959 	if (pass != 1)
960 		return;
961 
962 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
963 	        jd->jd_jid, jd->jd_found_revokes);
964 
965 	gfs2_revoke_clean(jd);
966 }
967 
968 /**
969  * databuf_lo_before_commit - Scan the data buffers, writing as we go
970  *
971  */
972 
973 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
974 {
975 	unsigned int limit = databuf_limit(sdp);
976 	unsigned int nbuf;
977 	if (tr == NULL)
978 		return;
979 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
980 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
981 }
982 
983 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
984 				    struct gfs2_log_descriptor *ld,
985 				    __be64 *ptr, int pass)
986 {
987 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
988 	struct gfs2_glock *gl = ip->i_gl;
989 	unsigned int blks = be32_to_cpu(ld->ld_data1);
990 	struct buffer_head *bh_log, *bh_ip;
991 	u64 blkno;
992 	u64 esc;
993 	int error = 0;
994 
995 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
996 		return 0;
997 
998 	gfs2_replay_incr_blk(jd, &start);
999 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1000 		blkno = be64_to_cpu(*ptr++);
1001 		esc = be64_to_cpu(*ptr++);
1002 
1003 		jd->jd_found_blocks++;
1004 
1005 		if (gfs2_revoke_check(jd, blkno, start))
1006 			continue;
1007 
1008 		error = gfs2_replay_read_block(jd, start, &bh_log);
1009 		if (error)
1010 			return error;
1011 
1012 		bh_ip = gfs2_meta_new(gl, blkno);
1013 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1014 
1015 		/* Unescape */
1016 		if (esc) {
1017 			__be32 *eptr = (__be32 *)bh_ip->b_data;
1018 			*eptr = cpu_to_be32(GFS2_MAGIC);
1019 		}
1020 		mark_buffer_dirty(bh_ip);
1021 
1022 		brelse(bh_log);
1023 		brelse(bh_ip);
1024 
1025 		jd->jd_replayed_blocks++;
1026 	}
1027 
1028 	return error;
1029 }
1030 
1031 /* FIXME: sort out accounting for log blocks etc. */
1032 
1033 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1034 {
1035 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1036 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1037 
1038 	if (error) {
1039 		gfs2_meta_sync(ip->i_gl);
1040 		return;
1041 	}
1042 	if (pass != 1)
1043 		return;
1044 
1045 	/* data sync? */
1046 	gfs2_meta_sync(ip->i_gl);
1047 
1048 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1049 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1050 }
1051 
1052 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1053 {
1054 	struct list_head *head;
1055 	struct gfs2_bufdata *bd;
1056 
1057 	if (tr == NULL)
1058 		return;
1059 
1060 	head = &tr->tr_databuf;
1061 	while (!list_empty(head)) {
1062 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
1063 		list_del_init(&bd->bd_list);
1064 		gfs2_unpin(sdp, bd->bd_bh, tr);
1065 	}
1066 }
1067 
1068 
1069 static const struct gfs2_log_operations gfs2_buf_lops = {
1070 	.lo_before_commit = buf_lo_before_commit,
1071 	.lo_after_commit = buf_lo_after_commit,
1072 	.lo_before_scan = buf_lo_before_scan,
1073 	.lo_scan_elements = buf_lo_scan_elements,
1074 	.lo_after_scan = buf_lo_after_scan,
1075 	.lo_name = "buf",
1076 };
1077 
1078 static const struct gfs2_log_operations gfs2_revoke_lops = {
1079 	.lo_before_commit = revoke_lo_before_commit,
1080 	.lo_after_commit = revoke_lo_after_commit,
1081 	.lo_before_scan = revoke_lo_before_scan,
1082 	.lo_scan_elements = revoke_lo_scan_elements,
1083 	.lo_after_scan = revoke_lo_after_scan,
1084 	.lo_name = "revoke",
1085 };
1086 
1087 static const struct gfs2_log_operations gfs2_databuf_lops = {
1088 	.lo_before_commit = databuf_lo_before_commit,
1089 	.lo_after_commit = databuf_lo_after_commit,
1090 	.lo_scan_elements = databuf_lo_scan_elements,
1091 	.lo_after_scan = databuf_lo_after_scan,
1092 	.lo_name = "databuf",
1093 };
1094 
1095 const struct gfs2_log_operations *gfs2_log_ops[] = {
1096 	&gfs2_databuf_lops,
1097 	&gfs2_buf_lops,
1098 	&gfs2_revoke_lops,
1099 	NULL,
1100 };
1101 
1102