xref: /openbmc/linux/fs/gfs2/lops.c (revision 82218943)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18 
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "glops.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
34 
35 /**
36  * gfs2_pin - Pin a buffer in memory
37  * @sdp: The superblock
38  * @bh: The buffer to be pinned
39  *
40  * The log lock must be held when calling this function
41  */
42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43 {
44 	struct gfs2_bufdata *bd;
45 
46 	BUG_ON(!current->journal_info);
47 
48 	clear_buffer_dirty(bh);
49 	if (test_set_buffer_pinned(bh))
50 		gfs2_assert_withdraw(sdp, 0);
51 	if (!buffer_uptodate(bh))
52 		gfs2_io_error_bh_wd(sdp, bh);
53 	bd = bh->b_private;
54 	/* If this buffer is in the AIL and it has already been written
55 	 * to in-place disk block, remove it from the AIL.
56 	 */
57 	spin_lock(&sdp->sd_ail_lock);
58 	if (bd->bd_tr)
59 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 	spin_unlock(&sdp->sd_ail_lock);
61 	get_bh(bh);
62 	atomic_inc(&sdp->sd_log_pinned);
63 	trace_gfs2_pin(bd, 1);
64 }
65 
66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67 {
68 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 }
70 
71 static void maybe_release_space(struct gfs2_bufdata *bd)
72 {
73 	struct gfs2_glock *gl = bd->bd_gl;
74 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
78 
79 	if (bi->bi_clone == NULL)
80 		return;
81 	if (sdp->sd_args.ar_discard)
82 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
83 	memcpy(bi->bi_clone + bi->bi_offset,
84 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
85 	clear_bit(GBF_FULL, &bi->bi_flags);
86 	rgd->rd_free_clone = rgd->rd_free;
87 	rgd->rd_extfail_pt = rgd->rd_free;
88 }
89 
90 /**
91  * gfs2_unpin - Unpin a buffer
92  * @sdp: the filesystem the buffer belongs to
93  * @bh: The buffer to unpin
94  * @ai:
95  * @flags: The inode dirty flags
96  *
97  */
98 
99 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
100 		       struct gfs2_trans *tr)
101 {
102 	struct gfs2_bufdata *bd = bh->b_private;
103 
104 	BUG_ON(!buffer_uptodate(bh));
105 	BUG_ON(!buffer_pinned(bh));
106 
107 	lock_buffer(bh);
108 	mark_buffer_dirty(bh);
109 	clear_buffer_pinned(bh);
110 
111 	if (buffer_is_rgrp(bd))
112 		maybe_release_space(bd);
113 
114 	spin_lock(&sdp->sd_ail_lock);
115 	if (bd->bd_tr) {
116 		list_del(&bd->bd_ail_st_list);
117 		brelse(bh);
118 	} else {
119 		struct gfs2_glock *gl = bd->bd_gl;
120 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
121 		atomic_inc(&gl->gl_ail_count);
122 	}
123 	bd->bd_tr = tr;
124 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125 	spin_unlock(&sdp->sd_ail_lock);
126 
127 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
128 	trace_gfs2_pin(bd, 0);
129 	unlock_buffer(bh);
130 	atomic_dec(&sdp->sd_log_pinned);
131 }
132 
133 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134 {
135 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
136 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
137 
138 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
139 		sdp->sd_log_flush_head = 0;
140 }
141 
142 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
143 {
144 	struct gfs2_journal_extent *je;
145 
146 	list_for_each_entry(je, &jd->extent_list, list) {
147 		if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
148 			return je->dblock + lblock - je->lblock;
149 	}
150 
151 	return -1;
152 }
153 
154 /**
155  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
156  * @sdp: The superblock
157  * @bvec: The bio_vec
158  * @error: The i/o status
159  *
160  * This finds the relevant buffers and unlocks them and sets the
161  * error flag according to the status of the i/o request. This is
162  * used when the log is writing data which has an in-place version
163  * that is pinned in the pagecache.
164  */
165 
166 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
167 				  struct bio_vec *bvec,
168 				  blk_status_t error)
169 {
170 	struct buffer_head *bh, *next;
171 	struct page *page = bvec->bv_page;
172 	unsigned size;
173 
174 	bh = page_buffers(page);
175 	size = bvec->bv_len;
176 	while (bh_offset(bh) < bvec->bv_offset)
177 		bh = bh->b_this_page;
178 	do {
179 		if (error)
180 			mark_buffer_write_io_error(bh);
181 		unlock_buffer(bh);
182 		next = bh->b_this_page;
183 		size -= bh->b_size;
184 		brelse(bh);
185 		bh = next;
186 	} while(bh && size);
187 }
188 
189 /**
190  * gfs2_end_log_write - end of i/o to the log
191  * @bio: The bio
192  *
193  * Each bio_vec contains either data from the pagecache or data
194  * relating to the log itself. Here we iterate over the bio_vec
195  * array, processing both kinds of data.
196  *
197  */
198 
199 static void gfs2_end_log_write(struct bio *bio)
200 {
201 	struct gfs2_sbd *sdp = bio->bi_private;
202 	struct bio_vec *bvec;
203 	struct page *page;
204 	struct bvec_iter_all iter_all;
205 
206 	if (bio->bi_status) {
207 		if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
208 			fs_err(sdp, "Error %d writing to journal, jid=%u\n",
209 			       bio->bi_status, sdp->sd_jdesc->jd_jid);
210 		gfs2_withdraw_delayed(sdp);
211 		/* prevent more writes to the journal */
212 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
213 		wake_up(&sdp->sd_logd_waitq);
214 	}
215 
216 	bio_for_each_segment_all(bvec, bio, iter_all) {
217 		page = bvec->bv_page;
218 		if (page_has_buffers(page))
219 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
220 		else
221 			mempool_free(page, gfs2_page_pool);
222 	}
223 
224 	bio_put(bio);
225 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
226 		wake_up(&sdp->sd_log_flush_wait);
227 }
228 
229 /**
230  * gfs2_log_submit_bio - Submit any pending log bio
231  * @biop: Address of the bio pointer
232  * @opf: REQ_OP | op_flags
233  *
234  * Submit any pending part-built or full bio to the block device. If
235  * there is no pending bio, then this is a no-op.
236  */
237 
238 void gfs2_log_submit_bio(struct bio **biop, int opf)
239 {
240 	struct bio *bio = *biop;
241 	if (bio) {
242 		struct gfs2_sbd *sdp = bio->bi_private;
243 		atomic_inc(&sdp->sd_log_in_flight);
244 		bio->bi_opf = opf;
245 		submit_bio(bio);
246 		*biop = NULL;
247 	}
248 }
249 
250 /**
251  * gfs2_log_alloc_bio - Allocate a bio
252  * @sdp: The super block
253  * @blkno: The device block number we want to write to
254  * @end_io: The bi_end_io callback
255  *
256  * Allocate a new bio, initialize it with the given parameters and return it.
257  *
258  * Returns: The newly allocated bio
259  */
260 
261 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
262 				      bio_end_io_t *end_io)
263 {
264 	struct super_block *sb = sdp->sd_vfs;
265 	struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
266 
267 	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
268 	bio_set_dev(bio, sb->s_bdev);
269 	bio->bi_end_io = end_io;
270 	bio->bi_private = sdp;
271 
272 	return bio;
273 }
274 
275 /**
276  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
277  * @sdp: The super block
278  * @blkno: The device block number we want to write to
279  * @bio: The bio to get or allocate
280  * @op: REQ_OP
281  * @end_io: The bi_end_io callback
282  * @flush: Always flush the current bio and allocate a new one?
283  *
284  * If there is a cached bio, then if the next block number is sequential
285  * with the previous one, return it, otherwise flush the bio to the
286  * device. If there is no cached bio, or we just flushed it, then
287  * allocate a new one.
288  *
289  * Returns: The bio to use for log writes
290  */
291 
292 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
293 				    struct bio **biop, int op,
294 				    bio_end_io_t *end_io, bool flush)
295 {
296 	struct bio *bio = *biop;
297 
298 	if (bio) {
299 		u64 nblk;
300 
301 		nblk = bio_end_sector(bio);
302 		nblk >>= sdp->sd_fsb2bb_shift;
303 		if (blkno == nblk && !flush)
304 			return bio;
305 		gfs2_log_submit_bio(biop, op);
306 	}
307 
308 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
309 	return *biop;
310 }
311 
312 /**
313  * gfs2_log_write - write to log
314  * @sdp: the filesystem
315  * @page: the page to write
316  * @size: the size of the data to write
317  * @offset: the offset within the page
318  * @blkno: block number of the log entry
319  *
320  * Try and add the page segment to the current bio. If that fails,
321  * submit the current bio to the device and create a new one, and
322  * then add the page segment to that.
323  */
324 
325 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
326 		    struct page *page, unsigned size, unsigned offset,
327 		    u64 blkno)
328 {
329 	struct bio *bio;
330 	int ret;
331 
332 	bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
333 			       gfs2_end_log_write, false);
334 	ret = bio_add_page(bio, page, size, offset);
335 	if (ret == 0) {
336 		bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
337 				       REQ_OP_WRITE, gfs2_end_log_write, true);
338 		ret = bio_add_page(bio, page, size, offset);
339 		WARN_ON(ret == 0);
340 	}
341 }
342 
343 /**
344  * gfs2_log_write_bh - write a buffer's content to the log
345  * @sdp: The super block
346  * @bh: The buffer pointing to the in-place location
347  *
348  * This writes the content of the buffer to the next available location
349  * in the log. The buffer will be unlocked once the i/o to the log has
350  * completed.
351  */
352 
353 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354 {
355 	u64 dblock;
356 
357 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
358 	gfs2_log_incr_head(sdp);
359 	gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
360 		       bh_offset(bh), dblock);
361 }
362 
363 /**
364  * gfs2_log_write_page - write one block stored in a page, into the log
365  * @sdp: The superblock
366  * @page: The struct page
367  *
368  * This writes the first block-sized part of the page into the log. Note
369  * that the page must have been allocated from the gfs2_page_pool mempool
370  * and that after this has been called, ownership has been transferred and
371  * the page may be freed at any time.
372  */
373 
374 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
375 {
376 	struct super_block *sb = sdp->sd_vfs;
377 	u64 dblock;
378 
379 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
380 	gfs2_log_incr_head(sdp);
381 	gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
382 }
383 
384 /**
385  * gfs2_end_log_read - end I/O callback for reads from the log
386  * @bio: The bio
387  *
388  * Simply unlock the pages in the bio. The main thread will wait on them and
389  * process them in order as necessary.
390  */
391 
392 static void gfs2_end_log_read(struct bio *bio)
393 {
394 	struct page *page;
395 	struct bio_vec *bvec;
396 	struct bvec_iter_all iter_all;
397 
398 	bio_for_each_segment_all(bvec, bio, iter_all) {
399 		page = bvec->bv_page;
400 		if (bio->bi_status) {
401 			int err = blk_status_to_errno(bio->bi_status);
402 
403 			SetPageError(page);
404 			mapping_set_error(page->mapping, err);
405 		}
406 		unlock_page(page);
407 	}
408 
409 	bio_put(bio);
410 }
411 
412 /**
413  * gfs2_jhead_pg_srch - Look for the journal head in a given page.
414  * @jd: The journal descriptor
415  * @page: The page to look in
416  *
417  * Returns: 1 if found, 0 otherwise.
418  */
419 
420 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
421 			      struct gfs2_log_header_host *head,
422 			      struct page *page)
423 {
424 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
425 	struct gfs2_log_header_host lh;
426 	void *kaddr = kmap_atomic(page);
427 	unsigned int offset;
428 	bool ret = false;
429 
430 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
431 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
432 			if (lh.lh_sequence >= head->lh_sequence)
433 				*head = lh;
434 			else {
435 				ret = true;
436 				break;
437 			}
438 		}
439 	}
440 	kunmap_atomic(kaddr);
441 	return ret;
442 }
443 
444 /**
445  * gfs2_jhead_process_page - Search/cleanup a page
446  * @jd: The journal descriptor
447  * @index: Index of the page to look into
448  * @done: If set, perform only cleanup, else search and set if found.
449  *
450  * Find the page with 'index' in the journal's mapping. Search the page for
451  * the journal head if requested (cleanup == false). Release refs on the
452  * page so the page cache can reclaim it (put_page() twice). We grabbed a
453  * reference on this page two times, first when we did a find_or_create_page()
454  * to obtain the page to add it to the bio and second when we do a
455  * find_get_page() here to get the page to wait on while I/O on it is being
456  * completed.
457  * This function is also used to free up a page we might've grabbed but not
458  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
459  * submitted the I/O, but we already found the jhead so we only need to drop
460  * our references to the page.
461  */
462 
463 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
464 				    struct gfs2_log_header_host *head,
465 				    bool *done)
466 {
467 	struct page *page;
468 
469 	page = find_get_page(jd->jd_inode->i_mapping, index);
470 	wait_on_page_locked(page);
471 
472 	if (PageError(page))
473 		*done = true;
474 
475 	if (!*done)
476 		*done = gfs2_jhead_pg_srch(jd, head, page);
477 
478 	put_page(page); /* Once for find_get_page */
479 	put_page(page); /* Once more for find_or_create_page */
480 }
481 
482 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
483 {
484 	struct bio *new;
485 
486 	new = bio_alloc(GFP_NOIO, nr_iovecs);
487 	bio_copy_dev(new, prev);
488 	new->bi_iter.bi_sector = bio_end_sector(prev);
489 	new->bi_opf = prev->bi_opf;
490 	new->bi_write_hint = prev->bi_write_hint;
491 	bio_chain(new, prev);
492 	submit_bio(prev);
493 	return new;
494 }
495 
496 /**
497  * gfs2_find_jhead - find the head of a log
498  * @jd: The journal descriptor
499  * @head: The log descriptor for the head of the log is returned here
500  *
501  * Do a search of a journal by reading it in large chunks using bios and find
502  * the valid log entry with the highest sequence number.  (i.e. the log head)
503  *
504  * Returns: 0 on success, errno otherwise
505  */
506 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
507 		    bool keep_cache)
508 {
509 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
510 	struct address_space *mapping = jd->jd_inode->i_mapping;
511 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
512 	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
513 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
514 	unsigned int shift = PAGE_SHIFT - bsize_shift;
515 	unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
516 	struct gfs2_journal_extent *je;
517 	int sz, ret = 0;
518 	struct bio *bio = NULL;
519 	struct page *page = NULL;
520 	bool done = false;
521 	errseq_t since;
522 
523 	memset(head, 0, sizeof(*head));
524 	if (list_empty(&jd->extent_list))
525 		gfs2_map_journal_extents(sdp, jd);
526 
527 	since = filemap_sample_wb_err(mapping);
528 	list_for_each_entry(je, &jd->extent_list, list) {
529 		u64 dblock = je->dblock;
530 
531 		for (; block < je->lblock + je->blocks; block++, dblock++) {
532 			if (!page) {
533 				page = find_or_create_page(mapping,
534 						block >> shift, GFP_NOFS);
535 				if (!page) {
536 					ret = -ENOMEM;
537 					done = true;
538 					goto out;
539 				}
540 				off = 0;
541 			}
542 
543 			if (bio && (off || block < blocks_submitted + max_blocks)) {
544 				sector_t sector = dblock << sdp->sd_fsb2bb_shift;
545 
546 				if (bio_end_sector(bio) == sector) {
547 					sz = bio_add_page(bio, page, bsize, off);
548 					if (sz == bsize)
549 						goto block_added;
550 				}
551 				if (off) {
552 					unsigned int blocks =
553 						(PAGE_SIZE - off) >> bsize_shift;
554 
555 					bio = gfs2_chain_bio(bio, blocks);
556 					goto add_block_to_new_bio;
557 				}
558 			}
559 
560 			if (bio) {
561 				blocks_submitted = block;
562 				submit_bio(bio);
563 			}
564 
565 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
566 			bio->bi_opf = REQ_OP_READ;
567 add_block_to_new_bio:
568 			sz = bio_add_page(bio, page, bsize, off);
569 			BUG_ON(sz != bsize);
570 block_added:
571 			off += bsize;
572 			if (off == PAGE_SIZE)
573 				page = NULL;
574 			if (blocks_submitted <= blocks_read + max_blocks) {
575 				/* Keep at least one bio in flight */
576 				continue;
577 			}
578 
579 			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
580 			blocks_read += PAGE_SIZE >> bsize_shift;
581 			if (done)
582 				goto out;  /* found */
583 		}
584 	}
585 
586 out:
587 	if (bio)
588 		submit_bio(bio);
589 	while (blocks_read < block) {
590 		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
591 		blocks_read += PAGE_SIZE >> bsize_shift;
592 	}
593 
594 	if (!ret)
595 		ret = filemap_check_wb_err(mapping, since);
596 
597 	if (!keep_cache)
598 		truncate_inode_pages(mapping, 0);
599 
600 	return ret;
601 }
602 
603 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
604 				      u32 ld_length, u32 ld_data1)
605 {
606 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
607 	struct gfs2_log_descriptor *ld = page_address(page);
608 	clear_page(ld);
609 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
610 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
611 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
612 	ld->ld_type = cpu_to_be32(ld_type);
613 	ld->ld_length = cpu_to_be32(ld_length);
614 	ld->ld_data1 = cpu_to_be32(ld_data1);
615 	ld->ld_data2 = 0;
616 	return page;
617 }
618 
619 static void gfs2_check_magic(struct buffer_head *bh)
620 {
621 	void *kaddr;
622 	__be32 *ptr;
623 
624 	clear_buffer_escaped(bh);
625 	kaddr = kmap_atomic(bh->b_page);
626 	ptr = kaddr + bh_offset(bh);
627 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
628 		set_buffer_escaped(bh);
629 	kunmap_atomic(kaddr);
630 }
631 
632 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
633 {
634 	struct gfs2_bufdata *bda, *bdb;
635 
636 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
637 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
638 
639 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
640 		return -1;
641 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
642 		return 1;
643 	return 0;
644 }
645 
646 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
647 				unsigned int total, struct list_head *blist,
648 				bool is_databuf)
649 {
650 	struct gfs2_log_descriptor *ld;
651 	struct gfs2_bufdata *bd1 = NULL, *bd2;
652 	struct page *page;
653 	unsigned int num;
654 	unsigned n;
655 	__be64 *ptr;
656 
657 	gfs2_log_lock(sdp);
658 	list_sort(NULL, blist, blocknr_cmp);
659 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
660 	while(total) {
661 		num = total;
662 		if (total > limit)
663 			num = limit;
664 		gfs2_log_unlock(sdp);
665 		page = gfs2_get_log_desc(sdp,
666 					 is_databuf ? GFS2_LOG_DESC_JDATA :
667 					 GFS2_LOG_DESC_METADATA, num + 1, num);
668 		ld = page_address(page);
669 		gfs2_log_lock(sdp);
670 		ptr = (__be64 *)(ld + 1);
671 
672 		n = 0;
673 		list_for_each_entry_continue(bd1, blist, bd_list) {
674 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
675 			if (is_databuf) {
676 				gfs2_check_magic(bd1->bd_bh);
677 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
678 			}
679 			if (++n >= num)
680 				break;
681 		}
682 
683 		gfs2_log_unlock(sdp);
684 		gfs2_log_write_page(sdp, page);
685 		gfs2_log_lock(sdp);
686 
687 		n = 0;
688 		list_for_each_entry_continue(bd2, blist, bd_list) {
689 			get_bh(bd2->bd_bh);
690 			gfs2_log_unlock(sdp);
691 			lock_buffer(bd2->bd_bh);
692 
693 			if (buffer_escaped(bd2->bd_bh)) {
694 				void *kaddr;
695 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
696 				ptr = page_address(page);
697 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
698 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
699 				       bd2->bd_bh->b_size);
700 				kunmap_atomic(kaddr);
701 				*(__be32 *)ptr = 0;
702 				clear_buffer_escaped(bd2->bd_bh);
703 				unlock_buffer(bd2->bd_bh);
704 				brelse(bd2->bd_bh);
705 				gfs2_log_write_page(sdp, page);
706 			} else {
707 				gfs2_log_write_bh(sdp, bd2->bd_bh);
708 			}
709 			gfs2_log_lock(sdp);
710 			if (++n >= num)
711 				break;
712 		}
713 
714 		BUG_ON(total < num);
715 		total -= num;
716 	}
717 	gfs2_log_unlock(sdp);
718 }
719 
720 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
721 {
722 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
723 	unsigned int nbuf;
724 	if (tr == NULL)
725 		return;
726 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
727 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
728 }
729 
730 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
731 {
732 	struct list_head *head;
733 	struct gfs2_bufdata *bd;
734 
735 	if (tr == NULL)
736 		return;
737 
738 	head = &tr->tr_buf;
739 	while (!list_empty(head)) {
740 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
741 		list_del_init(&bd->bd_list);
742 		gfs2_unpin(sdp, bd->bd_bh, tr);
743 	}
744 }
745 
746 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
747 			       struct gfs2_log_header_host *head, int pass)
748 {
749 	if (pass != 0)
750 		return;
751 
752 	jd->jd_found_blocks = 0;
753 	jd->jd_replayed_blocks = 0;
754 }
755 
756 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
757 				struct gfs2_log_descriptor *ld, __be64 *ptr,
758 				int pass)
759 {
760 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
761 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
762 	struct gfs2_glock *gl = ip->i_gl;
763 	unsigned int blks = be32_to_cpu(ld->ld_data1);
764 	struct buffer_head *bh_log, *bh_ip;
765 	u64 blkno;
766 	int error = 0;
767 
768 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
769 		return 0;
770 
771 	gfs2_replay_incr_blk(jd, &start);
772 
773 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
774 		blkno = be64_to_cpu(*ptr++);
775 
776 		jd->jd_found_blocks++;
777 
778 		if (gfs2_revoke_check(jd, blkno, start))
779 			continue;
780 
781 		error = gfs2_replay_read_block(jd, start, &bh_log);
782 		if (error)
783 			return error;
784 
785 		bh_ip = gfs2_meta_new(gl, blkno);
786 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
787 
788 		if (gfs2_meta_check(sdp, bh_ip))
789 			error = -EIO;
790 		else {
791 			struct gfs2_meta_header *mh =
792 				(struct gfs2_meta_header *)bh_ip->b_data;
793 
794 			if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
795 				struct gfs2_rgrpd *rgd;
796 
797 				rgd = gfs2_blk2rgrpd(sdp, blkno, false);
798 				if (rgd && rgd->rd_addr == blkno &&
799 				    rgd->rd_bits && rgd->rd_bits->bi_bh) {
800 					fs_info(sdp, "Replaying 0x%llx but we "
801 						"already have a bh!\n",
802 						(unsigned long long)blkno);
803 					fs_info(sdp, "busy:%d, pinned:%d\n",
804 						buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
805 						buffer_pinned(rgd->rd_bits->bi_bh));
806 					gfs2_dump_glock(NULL, rgd->rd_gl, true);
807 				}
808 			}
809 			mark_buffer_dirty(bh_ip);
810 		}
811 		brelse(bh_log);
812 		brelse(bh_ip);
813 
814 		if (error)
815 			break;
816 
817 		jd->jd_replayed_blocks++;
818 	}
819 
820 	return error;
821 }
822 
823 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
824 {
825 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
826 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
827 
828 	if (error) {
829 		gfs2_inode_metasync(ip->i_gl);
830 		return;
831 	}
832 	if (pass != 1)
833 		return;
834 
835 	gfs2_inode_metasync(ip->i_gl);
836 
837 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
838 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
839 }
840 
841 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
842 {
843 	struct gfs2_meta_header *mh;
844 	unsigned int offset;
845 	struct list_head *head = &sdp->sd_log_revokes;
846 	struct gfs2_bufdata *bd;
847 	struct page *page;
848 	unsigned int length;
849 
850 	gfs2_write_revokes(sdp);
851 	if (!sdp->sd_log_num_revoke)
852 		return;
853 
854 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
855 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
856 	offset = sizeof(struct gfs2_log_descriptor);
857 
858 	list_for_each_entry(bd, head, bd_list) {
859 		sdp->sd_log_num_revoke--;
860 
861 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
862 
863 			gfs2_log_write_page(sdp, page);
864 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
865 			mh = page_address(page);
866 			clear_page(mh);
867 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
868 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
869 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
870 			offset = sizeof(struct gfs2_meta_header);
871 		}
872 
873 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
874 		offset += sizeof(u64);
875 	}
876 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
877 
878 	gfs2_log_write_page(sdp, page);
879 }
880 
881 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
882 {
883 	struct list_head *head = &sdp->sd_log_revokes;
884 	struct gfs2_bufdata *bd;
885 	struct gfs2_glock *gl;
886 
887 	while (!list_empty(head)) {
888 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
889 		list_del_init(&bd->bd_list);
890 		gl = bd->bd_gl;
891 		gfs2_glock_remove_revoke(gl);
892 		kmem_cache_free(gfs2_bufdata_cachep, bd);
893 	}
894 }
895 
896 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
897 				  struct gfs2_log_header_host *head, int pass)
898 {
899 	if (pass != 0)
900 		return;
901 
902 	jd->jd_found_revokes = 0;
903 	jd->jd_replay_tail = head->lh_tail;
904 }
905 
906 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
907 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
908 				   int pass)
909 {
910 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
911 	unsigned int blks = be32_to_cpu(ld->ld_length);
912 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
913 	struct buffer_head *bh;
914 	unsigned int offset;
915 	u64 blkno;
916 	int first = 1;
917 	int error;
918 
919 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
920 		return 0;
921 
922 	offset = sizeof(struct gfs2_log_descriptor);
923 
924 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
925 		error = gfs2_replay_read_block(jd, start, &bh);
926 		if (error)
927 			return error;
928 
929 		if (!first)
930 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
931 
932 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
933 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
934 
935 			error = gfs2_revoke_add(jd, blkno, start);
936 			if (error < 0) {
937 				brelse(bh);
938 				return error;
939 			}
940 			else if (error)
941 				jd->jd_found_revokes++;
942 
943 			if (!--revokes)
944 				break;
945 			offset += sizeof(u64);
946 		}
947 
948 		brelse(bh);
949 		offset = sizeof(struct gfs2_meta_header);
950 		first = 0;
951 	}
952 
953 	return 0;
954 }
955 
956 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
957 {
958 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
959 
960 	if (error) {
961 		gfs2_revoke_clean(jd);
962 		return;
963 	}
964 	if (pass != 1)
965 		return;
966 
967 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
968 	        jd->jd_jid, jd->jd_found_revokes);
969 
970 	gfs2_revoke_clean(jd);
971 }
972 
973 /**
974  * databuf_lo_before_commit - Scan the data buffers, writing as we go
975  *
976  */
977 
978 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
979 {
980 	unsigned int limit = databuf_limit(sdp);
981 	unsigned int nbuf;
982 	if (tr == NULL)
983 		return;
984 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
985 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
986 }
987 
988 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
989 				    struct gfs2_log_descriptor *ld,
990 				    __be64 *ptr, int pass)
991 {
992 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
993 	struct gfs2_glock *gl = ip->i_gl;
994 	unsigned int blks = be32_to_cpu(ld->ld_data1);
995 	struct buffer_head *bh_log, *bh_ip;
996 	u64 blkno;
997 	u64 esc;
998 	int error = 0;
999 
1000 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1001 		return 0;
1002 
1003 	gfs2_replay_incr_blk(jd, &start);
1004 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1005 		blkno = be64_to_cpu(*ptr++);
1006 		esc = be64_to_cpu(*ptr++);
1007 
1008 		jd->jd_found_blocks++;
1009 
1010 		if (gfs2_revoke_check(jd, blkno, start))
1011 			continue;
1012 
1013 		error = gfs2_replay_read_block(jd, start, &bh_log);
1014 		if (error)
1015 			return error;
1016 
1017 		bh_ip = gfs2_meta_new(gl, blkno);
1018 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1019 
1020 		/* Unescape */
1021 		if (esc) {
1022 			__be32 *eptr = (__be32 *)bh_ip->b_data;
1023 			*eptr = cpu_to_be32(GFS2_MAGIC);
1024 		}
1025 		mark_buffer_dirty(bh_ip);
1026 
1027 		brelse(bh_log);
1028 		brelse(bh_ip);
1029 
1030 		jd->jd_replayed_blocks++;
1031 	}
1032 
1033 	return error;
1034 }
1035 
1036 /* FIXME: sort out accounting for log blocks etc. */
1037 
1038 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1039 {
1040 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1041 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1042 
1043 	if (error) {
1044 		gfs2_inode_metasync(ip->i_gl);
1045 		return;
1046 	}
1047 	if (pass != 1)
1048 		return;
1049 
1050 	/* data sync? */
1051 	gfs2_inode_metasync(ip->i_gl);
1052 
1053 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1054 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1055 }
1056 
1057 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1058 {
1059 	struct list_head *head;
1060 	struct gfs2_bufdata *bd;
1061 
1062 	if (tr == NULL)
1063 		return;
1064 
1065 	head = &tr->tr_databuf;
1066 	while (!list_empty(head)) {
1067 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1068 		list_del_init(&bd->bd_list);
1069 		gfs2_unpin(sdp, bd->bd_bh, tr);
1070 	}
1071 }
1072 
1073 
1074 static const struct gfs2_log_operations gfs2_buf_lops = {
1075 	.lo_before_commit = buf_lo_before_commit,
1076 	.lo_after_commit = buf_lo_after_commit,
1077 	.lo_before_scan = buf_lo_before_scan,
1078 	.lo_scan_elements = buf_lo_scan_elements,
1079 	.lo_after_scan = buf_lo_after_scan,
1080 	.lo_name = "buf",
1081 };
1082 
1083 static const struct gfs2_log_operations gfs2_revoke_lops = {
1084 	.lo_before_commit = revoke_lo_before_commit,
1085 	.lo_after_commit = revoke_lo_after_commit,
1086 	.lo_before_scan = revoke_lo_before_scan,
1087 	.lo_scan_elements = revoke_lo_scan_elements,
1088 	.lo_after_scan = revoke_lo_after_scan,
1089 	.lo_name = "revoke",
1090 };
1091 
1092 static const struct gfs2_log_operations gfs2_databuf_lops = {
1093 	.lo_before_commit = databuf_lo_before_commit,
1094 	.lo_after_commit = databuf_lo_after_commit,
1095 	.lo_scan_elements = databuf_lo_scan_elements,
1096 	.lo_after_scan = databuf_lo_after_scan,
1097 	.lo_name = "databuf",
1098 };
1099 
1100 const struct gfs2_log_operations *gfs2_log_ops[] = {
1101 	&gfs2_databuf_lops,
1102 	&gfs2_buf_lops,
1103 	&gfs2_revoke_lops,
1104 	NULL,
1105 };
1106 
1107