xref: /openbmc/linux/fs/gfs2/lops.c (revision f4686c26)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 #include <linux/list_sort.h>
20 #include <linux/blkdev.h>
21 
22 #include "bmap.h"
23 #include "dir.h"
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "inode.h"
27 #include "glock.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "recovery.h"
32 #include "rgrp.h"
33 #include "trans.h"
34 #include "util.h"
35 #include "trace_gfs2.h"
36 
37 /**
38  * gfs2_pin - Pin a buffer in memory
39  * @sdp: The superblock
40  * @bh: The buffer to be pinned
41  *
42  * The log lock must be held when calling this function
43  */
44 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
45 {
46 	struct gfs2_bufdata *bd;
47 
48 	BUG_ON(!current->journal_info);
49 
50 	clear_buffer_dirty(bh);
51 	if (test_set_buffer_pinned(bh))
52 		gfs2_assert_withdraw(sdp, 0);
53 	if (!buffer_uptodate(bh))
54 		gfs2_io_error_bh_wd(sdp, bh);
55 	bd = bh->b_private;
56 	/* If this buffer is in the AIL and it has already been written
57 	 * to in-place disk block, remove it from the AIL.
58 	 */
59 	spin_lock(&sdp->sd_ail_lock);
60 	if (bd->bd_tr)
61 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
62 	spin_unlock(&sdp->sd_ail_lock);
63 	get_bh(bh);
64 	atomic_inc(&sdp->sd_log_pinned);
65 	trace_gfs2_pin(bd, 1);
66 }
67 
68 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
69 {
70 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
71 }
72 
73 static void maybe_release_space(struct gfs2_bufdata *bd)
74 {
75 	struct gfs2_glock *gl = bd->bd_gl;
76 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
77 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
78 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
79 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
80 
81 	if (bi->bi_clone == NULL)
82 		return;
83 	if (sdp->sd_args.ar_discard)
84 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
85 	memcpy(bi->bi_clone + bi->bi_offset,
86 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
87 	clear_bit(GBF_FULL, &bi->bi_flags);
88 	rgd->rd_free_clone = rgd->rd_free;
89 	rgd->rd_extfail_pt = rgd->rd_free;
90 }
91 
92 /**
93  * gfs2_unpin - Unpin a buffer
94  * @sdp: the filesystem the buffer belongs to
95  * @bh: The buffer to unpin
96  * @ai:
97  * @flags: The inode dirty flags
98  *
99  */
100 
101 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
102 		       struct gfs2_trans *tr)
103 {
104 	struct gfs2_bufdata *bd = bh->b_private;
105 
106 	BUG_ON(!buffer_uptodate(bh));
107 	BUG_ON(!buffer_pinned(bh));
108 
109 	lock_buffer(bh);
110 	mark_buffer_dirty(bh);
111 	clear_buffer_pinned(bh);
112 
113 	if (buffer_is_rgrp(bd))
114 		maybe_release_space(bd);
115 
116 	spin_lock(&sdp->sd_ail_lock);
117 	if (bd->bd_tr) {
118 		list_del(&bd->bd_ail_st_list);
119 		brelse(bh);
120 	} else {
121 		struct gfs2_glock *gl = bd->bd_gl;
122 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
123 		atomic_inc(&gl->gl_ail_count);
124 	}
125 	bd->bd_tr = tr;
126 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
127 	spin_unlock(&sdp->sd_ail_lock);
128 
129 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
130 	trace_gfs2_pin(bd, 0);
131 	unlock_buffer(bh);
132 	atomic_dec(&sdp->sd_log_pinned);
133 }
134 
135 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
136 {
137 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
138 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
139 
140 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
141 		sdp->sd_log_flush_head = 0;
142 }
143 
144 u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
145 {
146 	unsigned int lbn = sdp->sd_log_flush_head;
147 	struct gfs2_journal_extent *je;
148 	u64 block;
149 
150 	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
151 		if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
152 			block = je->dblock + lbn - je->lblock;
153 			gfs2_log_incr_head(sdp);
154 			return block;
155 		}
156 	}
157 
158 	return -1;
159 }
160 
161 /**
162  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
163  * @sdp: The superblock
164  * @bvec: The bio_vec
165  * @error: The i/o status
166  *
167  * This finds the relevant buffers and unlocks them and sets the
168  * error flag according to the status of the i/o request. This is
169  * used when the log is writing data which has an in-place version
170  * that is pinned in the pagecache.
171  */
172 
173 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
174 				  struct bio_vec *bvec,
175 				  blk_status_t error)
176 {
177 	struct buffer_head *bh, *next;
178 	struct page *page = bvec->bv_page;
179 	unsigned size;
180 
181 	bh = page_buffers(page);
182 	size = bvec->bv_len;
183 	while (bh_offset(bh) < bvec->bv_offset)
184 		bh = bh->b_this_page;
185 	do {
186 		if (error)
187 			mark_buffer_write_io_error(bh);
188 		unlock_buffer(bh);
189 		next = bh->b_this_page;
190 		size -= bh->b_size;
191 		brelse(bh);
192 		bh = next;
193 	} while(bh && size);
194 }
195 
196 /**
197  * gfs2_end_log_write - end of i/o to the log
198  * @bio: The bio
199  *
200  * Each bio_vec contains either data from the pagecache or data
201  * relating to the log itself. Here we iterate over the bio_vec
202  * array, processing both kinds of data.
203  *
204  */
205 
206 static void gfs2_end_log_write(struct bio *bio)
207 {
208 	struct gfs2_sbd *sdp = bio->bi_private;
209 	struct bio_vec *bvec;
210 	struct page *page;
211 	int i;
212 	struct bvec_iter_all iter_all;
213 
214 	if (bio->bi_status) {
215 		fs_err(sdp, "Error %d writing to journal, jid=%u\n",
216 		       bio->bi_status, sdp->sd_jdesc->jd_jid);
217 		wake_up(&sdp->sd_logd_waitq);
218 	}
219 
220 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
221 		page = bvec->bv_page;
222 		if (page_has_buffers(page))
223 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
224 		else
225 			mempool_free(page, gfs2_page_pool);
226 	}
227 
228 	bio_put(bio);
229 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
230 		wake_up(&sdp->sd_log_flush_wait);
231 }
232 
233 /**
234  * gfs2_log_submit_bio - Submit any pending log bio
235  * @biop: Address of the bio pointer
236  * @opf: REQ_OP | op_flags
237  *
238  * Submit any pending part-built or full bio to the block device. If
239  * there is no pending bio, then this is a no-op.
240  */
241 
242 void gfs2_log_submit_bio(struct bio **biop, int opf)
243 {
244 	struct bio *bio = *biop;
245 	if (bio) {
246 		struct gfs2_sbd *sdp = bio->bi_private;
247 		atomic_inc(&sdp->sd_log_in_flight);
248 		bio->bi_opf = opf;
249 		submit_bio(bio);
250 		*biop = NULL;
251 	}
252 }
253 
254 /**
255  * gfs2_log_alloc_bio - Allocate a bio
256  * @sdp: The super block
257  * @blkno: The device block number we want to write to
258  * @end_io: The bi_end_io callback
259  *
260  * Allocate a new bio, initialize it with the given parameters and return it.
261  *
262  * Returns: The newly allocated bio
263  */
264 
265 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
266 				      bio_end_io_t *end_io)
267 {
268 	struct super_block *sb = sdp->sd_vfs;
269 	struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
270 
271 	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
272 	bio_set_dev(bio, sb->s_bdev);
273 	bio->bi_end_io = end_io;
274 	bio->bi_private = sdp;
275 
276 	return bio;
277 }
278 
279 /**
280  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
281  * @sdp: The super block
282  * @blkno: The device block number we want to write to
283  * @bio: The bio to get or allocate
284  * @op: REQ_OP
285  * @end_io: The bi_end_io callback
286  * @flush: Always flush the current bio and allocate a new one?
287  *
288  * If there is a cached bio, then if the next block number is sequential
289  * with the previous one, return it, otherwise flush the bio to the
290  * device. If there is no cached bio, or we just flushed it, then
291  * allocate a new one.
292  *
293  * Returns: The bio to use for log writes
294  */
295 
296 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
297 				    struct bio **biop, int op,
298 				    bio_end_io_t *end_io, bool flush)
299 {
300 	struct bio *bio = *biop;
301 
302 	if (bio) {
303 		u64 nblk;
304 
305 		nblk = bio_end_sector(bio);
306 		nblk >>= sdp->sd_fsb2bb_shift;
307 		if (blkno == nblk && !flush)
308 			return bio;
309 		gfs2_log_submit_bio(biop, op);
310 	}
311 
312 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
313 	return *biop;
314 }
315 
316 /**
317  * gfs2_log_write - write to log
318  * @sdp: the filesystem
319  * @page: the page to write
320  * @size: the size of the data to write
321  * @offset: the offset within the page
322  * @blkno: block number of the log entry
323  *
324  * Try and add the page segment to the current bio. If that fails,
325  * submit the current bio to the device and create a new one, and
326  * then add the page segment to that.
327  */
328 
329 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
330 		    unsigned size, unsigned offset, u64 blkno)
331 {
332 	struct bio *bio;
333 	int ret;
334 
335 	bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
336 			       gfs2_end_log_write, false);
337 	ret = bio_add_page(bio, page, size, offset);
338 	if (ret == 0) {
339 		bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
340 				       REQ_OP_WRITE, gfs2_end_log_write, true);
341 		ret = bio_add_page(bio, page, size, offset);
342 		WARN_ON(ret == 0);
343 	}
344 }
345 
346 /**
347  * gfs2_log_write_bh - write a buffer's content to the log
348  * @sdp: The super block
349  * @bh: The buffer pointing to the in-place location
350  *
351  * This writes the content of the buffer to the next available location
352  * in the log. The buffer will be unlocked once the i/o to the log has
353  * completed.
354  */
355 
356 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
357 {
358 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
359 		       gfs2_log_bmap(sdp));
360 }
361 
362 /**
363  * gfs2_log_write_page - write one block stored in a page, into the log
364  * @sdp: The superblock
365  * @page: The struct page
366  *
367  * This writes the first block-sized part of the page into the log. Note
368  * that the page must have been allocated from the gfs2_page_pool mempool
369  * and that after this has been called, ownership has been transferred and
370  * the page may be freed at any time.
371  */
372 
373 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
374 {
375 	struct super_block *sb = sdp->sd_vfs;
376 	gfs2_log_write(sdp, page, sb->s_blocksize, 0,
377 		       gfs2_log_bmap(sdp));
378 }
379 
380 /**
381  * gfs2_end_log_read - end I/O callback for reads from the log
382  * @bio: The bio
383  *
384  * Simply unlock the pages in the bio. The main thread will wait on them and
385  * process them in order as necessary.
386  */
387 
388 static void gfs2_end_log_read(struct bio *bio)
389 {
390 	struct page *page;
391 	struct bio_vec *bvec;
392 	int i;
393 	struct bvec_iter_all iter_all;
394 
395 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
396 		page = bvec->bv_page;
397 		if (bio->bi_status) {
398 			int err = blk_status_to_errno(bio->bi_status);
399 
400 			SetPageError(page);
401 			mapping_set_error(page->mapping, err);
402 		}
403 		unlock_page(page);
404 	}
405 
406 	bio_put(bio);
407 }
408 
409 /**
410  * gfs2_jhead_pg_srch - Look for the journal head in a given page.
411  * @jd: The journal descriptor
412  * @page: The page to look in
413  *
414  * Returns: 1 if found, 0 otherwise.
415  */
416 
417 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
418 			      struct gfs2_log_header_host *head,
419 			      struct page *page)
420 {
421 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
422 	struct gfs2_log_header_host uninitialized_var(lh);
423 	void *kaddr = kmap_atomic(page);
424 	unsigned int offset;
425 	bool ret = false;
426 
427 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
428 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
429 			if (lh.lh_sequence > head->lh_sequence)
430 				*head = lh;
431 			else {
432 				ret = true;
433 				break;
434 			}
435 		}
436 	}
437 	kunmap_atomic(kaddr);
438 	return ret;
439 }
440 
441 /**
442  * gfs2_jhead_process_page - Search/cleanup a page
443  * @jd: The journal descriptor
444  * @index: Index of the page to look into
445  * @done: If set, perform only cleanup, else search and set if found.
446  *
447  * Find the page with 'index' in the journal's mapping. Search the page for
448  * the journal head if requested (cleanup == false). Release refs on the
449  * page so the page cache can reclaim it (put_page() twice). We grabbed a
450  * reference on this page two times, first when we did a find_or_create_page()
451  * to obtain the page to add it to the bio and second when we do a
452  * find_get_page() here to get the page to wait on while I/O on it is being
453  * completed.
454  * This function is also used to free up a page we might've grabbed but not
455  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
456  * submitted the I/O, but we already found the jhead so we only need to drop
457  * our references to the page.
458  */
459 
460 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
461 				    struct gfs2_log_header_host *head,
462 				    bool *done)
463 {
464 	struct page *page;
465 
466 	page = find_get_page(jd->jd_inode->i_mapping, index);
467 	wait_on_page_locked(page);
468 
469 	if (PageError(page))
470 		*done = true;
471 
472 	if (!*done)
473 		*done = gfs2_jhead_pg_srch(jd, head, page);
474 
475 	put_page(page); /* Once for find_get_page */
476 	put_page(page); /* Once more for find_or_create_page */
477 }
478 
479 /**
480  * gfs2_find_jhead - find the head of a log
481  * @jd: The journal descriptor
482  * @head: The log descriptor for the head of the log is returned here
483  *
484  * Do a search of a journal by reading it in large chunks using bios and find
485  * the valid log entry with the highest sequence number.  (i.e. the log head)
486  *
487  * Returns: 0 on success, errno otherwise
488  */
489 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
490 		    bool keep_cache)
491 {
492 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
493 	struct address_space *mapping = jd->jd_inode->i_mapping;
494 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
495 	unsigned int bsize = sdp->sd_sb.sb_bsize;
496 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
497 	unsigned int shift = PAGE_SHIFT - bsize_shift;
498 	unsigned int readhead_blocks = BIO_MAX_PAGES << shift;
499 	struct gfs2_journal_extent *je;
500 	int sz, ret = 0;
501 	struct bio *bio = NULL;
502 	struct page *page = NULL;
503 	bool done = false;
504 	errseq_t since;
505 
506 	memset(head, 0, sizeof(*head));
507 	if (list_empty(&jd->extent_list))
508 		gfs2_map_journal_extents(sdp, jd);
509 
510 	since = filemap_sample_wb_err(mapping);
511 	list_for_each_entry(je, &jd->extent_list, list) {
512 		for (; block < je->lblock + je->blocks; block++) {
513 			u64 dblock;
514 
515 			if (!page) {
516 				page = find_or_create_page(mapping,
517 						block >> shift, GFP_NOFS);
518 				if (!page) {
519 					ret = -ENOMEM;
520 					done = true;
521 					goto out;
522 				}
523 			}
524 
525 			if (bio) {
526 				unsigned int off;
527 
528 				off = (block << bsize_shift) & ~PAGE_MASK;
529 				sz = bio_add_page(bio, page, bsize, off);
530 				if (sz == bsize) { /* block added */
531 					if (off + bsize == PAGE_SIZE) {
532 						page = NULL;
533 						goto page_added;
534 					}
535 					continue;
536 				}
537 				blocks_submitted = block + 1;
538 				submit_bio(bio);
539 				bio = NULL;
540 			}
541 
542 			dblock = je->dblock + (block - je->lblock);
543 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
544 			bio->bi_opf = REQ_OP_READ;
545 			sz = bio_add_page(bio, page, bsize, 0);
546 			gfs2_assert_warn(sdp, sz == bsize);
547 			if (bsize == PAGE_SIZE)
548 				page = NULL;
549 
550 page_added:
551 			if (blocks_submitted < blocks_read + readhead_blocks) {
552 				/* Keep at least one bio in flight */
553 				continue;
554 			}
555 
556 			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
557 			blocks_read += PAGE_SIZE >> bsize_shift;
558 			if (done)
559 				goto out;  /* found */
560 		}
561 	}
562 
563 out:
564 	if (bio)
565 		submit_bio(bio);
566 	while (blocks_read < block) {
567 		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
568 		blocks_read += PAGE_SIZE >> bsize_shift;
569 	}
570 
571 	if (!ret)
572 		ret = filemap_check_wb_err(mapping, since);
573 
574 	if (!keep_cache)
575 		truncate_inode_pages(mapping, 0);
576 
577 	return ret;
578 }
579 
580 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
581 				      u32 ld_length, u32 ld_data1)
582 {
583 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
584 	struct gfs2_log_descriptor *ld = page_address(page);
585 	clear_page(ld);
586 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
587 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
588 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
589 	ld->ld_type = cpu_to_be32(ld_type);
590 	ld->ld_length = cpu_to_be32(ld_length);
591 	ld->ld_data1 = cpu_to_be32(ld_data1);
592 	ld->ld_data2 = 0;
593 	return page;
594 }
595 
596 static void gfs2_check_magic(struct buffer_head *bh)
597 {
598 	void *kaddr;
599 	__be32 *ptr;
600 
601 	clear_buffer_escaped(bh);
602 	kaddr = kmap_atomic(bh->b_page);
603 	ptr = kaddr + bh_offset(bh);
604 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
605 		set_buffer_escaped(bh);
606 	kunmap_atomic(kaddr);
607 }
608 
609 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
610 {
611 	struct gfs2_bufdata *bda, *bdb;
612 
613 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
614 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
615 
616 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
617 		return -1;
618 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
619 		return 1;
620 	return 0;
621 }
622 
623 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
624 				unsigned int total, struct list_head *blist,
625 				bool is_databuf)
626 {
627 	struct gfs2_log_descriptor *ld;
628 	struct gfs2_bufdata *bd1 = NULL, *bd2;
629 	struct page *page;
630 	unsigned int num;
631 	unsigned n;
632 	__be64 *ptr;
633 
634 	gfs2_log_lock(sdp);
635 	list_sort(NULL, blist, blocknr_cmp);
636 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
637 	while(total) {
638 		num = total;
639 		if (total > limit)
640 			num = limit;
641 		gfs2_log_unlock(sdp);
642 		page = gfs2_get_log_desc(sdp,
643 					 is_databuf ? GFS2_LOG_DESC_JDATA :
644 					 GFS2_LOG_DESC_METADATA, num + 1, num);
645 		ld = page_address(page);
646 		gfs2_log_lock(sdp);
647 		ptr = (__be64 *)(ld + 1);
648 
649 		n = 0;
650 		list_for_each_entry_continue(bd1, blist, bd_list) {
651 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
652 			if (is_databuf) {
653 				gfs2_check_magic(bd1->bd_bh);
654 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
655 			}
656 			if (++n >= num)
657 				break;
658 		}
659 
660 		gfs2_log_unlock(sdp);
661 		gfs2_log_write_page(sdp, page);
662 		gfs2_log_lock(sdp);
663 
664 		n = 0;
665 		list_for_each_entry_continue(bd2, blist, bd_list) {
666 			get_bh(bd2->bd_bh);
667 			gfs2_log_unlock(sdp);
668 			lock_buffer(bd2->bd_bh);
669 
670 			if (buffer_escaped(bd2->bd_bh)) {
671 				void *kaddr;
672 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
673 				ptr = page_address(page);
674 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
675 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
676 				       bd2->bd_bh->b_size);
677 				kunmap_atomic(kaddr);
678 				*(__be32 *)ptr = 0;
679 				clear_buffer_escaped(bd2->bd_bh);
680 				unlock_buffer(bd2->bd_bh);
681 				brelse(bd2->bd_bh);
682 				gfs2_log_write_page(sdp, page);
683 			} else {
684 				gfs2_log_write_bh(sdp, bd2->bd_bh);
685 			}
686 			gfs2_log_lock(sdp);
687 			if (++n >= num)
688 				break;
689 		}
690 
691 		BUG_ON(total < num);
692 		total -= num;
693 	}
694 	gfs2_log_unlock(sdp);
695 }
696 
697 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
698 {
699 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
700 	unsigned int nbuf;
701 	if (tr == NULL)
702 		return;
703 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
704 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
705 }
706 
707 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
708 {
709 	struct list_head *head;
710 	struct gfs2_bufdata *bd;
711 
712 	if (tr == NULL)
713 		return;
714 
715 	head = &tr->tr_buf;
716 	while (!list_empty(head)) {
717 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
718 		list_del_init(&bd->bd_list);
719 		gfs2_unpin(sdp, bd->bd_bh, tr);
720 	}
721 }
722 
723 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
724 			       struct gfs2_log_header_host *head, int pass)
725 {
726 	if (pass != 0)
727 		return;
728 
729 	jd->jd_found_blocks = 0;
730 	jd->jd_replayed_blocks = 0;
731 }
732 
733 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
734 				struct gfs2_log_descriptor *ld, __be64 *ptr,
735 				int pass)
736 {
737 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
738 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
739 	struct gfs2_glock *gl = ip->i_gl;
740 	unsigned int blks = be32_to_cpu(ld->ld_data1);
741 	struct buffer_head *bh_log, *bh_ip;
742 	u64 blkno;
743 	int error = 0;
744 
745 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
746 		return 0;
747 
748 	gfs2_replay_incr_blk(jd, &start);
749 
750 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
751 		blkno = be64_to_cpu(*ptr++);
752 
753 		jd->jd_found_blocks++;
754 
755 		if (gfs2_revoke_check(jd, blkno, start))
756 			continue;
757 
758 		error = gfs2_replay_read_block(jd, start, &bh_log);
759 		if (error)
760 			return error;
761 
762 		bh_ip = gfs2_meta_new(gl, blkno);
763 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
764 
765 		if (gfs2_meta_check(sdp, bh_ip))
766 			error = -EIO;
767 		else
768 			mark_buffer_dirty(bh_ip);
769 
770 		brelse(bh_log);
771 		brelse(bh_ip);
772 
773 		if (error)
774 			break;
775 
776 		jd->jd_replayed_blocks++;
777 	}
778 
779 	return error;
780 }
781 
782 /**
783  * gfs2_meta_sync - Sync all buffers associated with a glock
784  * @gl: The glock
785  *
786  */
787 
788 static void gfs2_meta_sync(struct gfs2_glock *gl)
789 {
790 	struct address_space *mapping = gfs2_glock2aspace(gl);
791 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
792 	int error;
793 
794 	if (mapping == NULL)
795 		mapping = &sdp->sd_aspace;
796 
797 	filemap_fdatawrite(mapping);
798 	error = filemap_fdatawait(mapping);
799 
800 	if (error)
801 		gfs2_io_error(gl->gl_name.ln_sbd);
802 }
803 
804 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
805 {
806 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
807 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
808 
809 	if (error) {
810 		gfs2_meta_sync(ip->i_gl);
811 		return;
812 	}
813 	if (pass != 1)
814 		return;
815 
816 	gfs2_meta_sync(ip->i_gl);
817 
818 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
819 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
820 }
821 
822 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
823 {
824 	struct gfs2_meta_header *mh;
825 	unsigned int offset;
826 	struct list_head *head = &sdp->sd_log_revokes;
827 	struct gfs2_bufdata *bd;
828 	struct page *page;
829 	unsigned int length;
830 
831 	gfs2_write_revokes(sdp);
832 	if (!sdp->sd_log_num_revoke)
833 		return;
834 
835 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
836 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
837 	offset = sizeof(struct gfs2_log_descriptor);
838 
839 	list_for_each_entry(bd, head, bd_list) {
840 		sdp->sd_log_num_revoke--;
841 
842 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
843 
844 			gfs2_log_write_page(sdp, page);
845 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
846 			mh = page_address(page);
847 			clear_page(mh);
848 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
849 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
850 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
851 			offset = sizeof(struct gfs2_meta_header);
852 		}
853 
854 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
855 		offset += sizeof(u64);
856 	}
857 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
858 
859 	gfs2_log_write_page(sdp, page);
860 }
861 
862 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
863 {
864 	struct list_head *head = &sdp->sd_log_revokes;
865 	struct gfs2_bufdata *bd, *tmp;
866 
867 	/*
868 	 * Glocks can be referenced repeatedly on the revoke list, but the list
869 	 * only holds one reference.  All glocks on the list will have the
870 	 * GLF_REVOKES flag set initially.
871 	 */
872 
873 	list_for_each_entry_safe(bd, tmp, head, bd_list) {
874 		struct gfs2_glock *gl = bd->bd_gl;
875 
876 		if (test_bit(GLF_REVOKES, &gl->gl_flags)) {
877 			/* Keep each glock on the list exactly once. */
878 			clear_bit(GLF_REVOKES, &gl->gl_flags);
879 			continue;
880 		}
881 		list_del(&bd->bd_list);
882 		kmem_cache_free(gfs2_bufdata_cachep, bd);
883 	}
884 	list_for_each_entry_safe(bd, tmp, head, bd_list) {
885 		struct gfs2_glock *gl = bd->bd_gl;
886 
887 		list_del(&bd->bd_list);
888 		kmem_cache_free(gfs2_bufdata_cachep, bd);
889 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
890 		gfs2_glock_queue_put(gl);
891 	}
892 	/* the list is empty now */
893 }
894 
895 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
896 				  struct gfs2_log_header_host *head, int pass)
897 {
898 	if (pass != 0)
899 		return;
900 
901 	jd->jd_found_revokes = 0;
902 	jd->jd_replay_tail = head->lh_tail;
903 }
904 
905 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
906 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
907 				   int pass)
908 {
909 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
910 	unsigned int blks = be32_to_cpu(ld->ld_length);
911 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
912 	struct buffer_head *bh;
913 	unsigned int offset;
914 	u64 blkno;
915 	int first = 1;
916 	int error;
917 
918 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
919 		return 0;
920 
921 	offset = sizeof(struct gfs2_log_descriptor);
922 
923 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
924 		error = gfs2_replay_read_block(jd, start, &bh);
925 		if (error)
926 			return error;
927 
928 		if (!first)
929 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
930 
931 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
932 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
933 
934 			error = gfs2_revoke_add(jd, blkno, start);
935 			if (error < 0) {
936 				brelse(bh);
937 				return error;
938 			}
939 			else if (error)
940 				jd->jd_found_revokes++;
941 
942 			if (!--revokes)
943 				break;
944 			offset += sizeof(u64);
945 		}
946 
947 		brelse(bh);
948 		offset = sizeof(struct gfs2_meta_header);
949 		first = 0;
950 	}
951 
952 	return 0;
953 }
954 
955 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
956 {
957 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
958 
959 	if (error) {
960 		gfs2_revoke_clean(jd);
961 		return;
962 	}
963 	if (pass != 1)
964 		return;
965 
966 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
967 	        jd->jd_jid, jd->jd_found_revokes);
968 
969 	gfs2_revoke_clean(jd);
970 }
971 
972 /**
973  * databuf_lo_before_commit - Scan the data buffers, writing as we go
974  *
975  */
976 
977 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
978 {
979 	unsigned int limit = databuf_limit(sdp);
980 	unsigned int nbuf;
981 	if (tr == NULL)
982 		return;
983 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
984 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
985 }
986 
987 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
988 				    struct gfs2_log_descriptor *ld,
989 				    __be64 *ptr, int pass)
990 {
991 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
992 	struct gfs2_glock *gl = ip->i_gl;
993 	unsigned int blks = be32_to_cpu(ld->ld_data1);
994 	struct buffer_head *bh_log, *bh_ip;
995 	u64 blkno;
996 	u64 esc;
997 	int error = 0;
998 
999 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1000 		return 0;
1001 
1002 	gfs2_replay_incr_blk(jd, &start);
1003 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1004 		blkno = be64_to_cpu(*ptr++);
1005 		esc = be64_to_cpu(*ptr++);
1006 
1007 		jd->jd_found_blocks++;
1008 
1009 		if (gfs2_revoke_check(jd, blkno, start))
1010 			continue;
1011 
1012 		error = gfs2_replay_read_block(jd, start, &bh_log);
1013 		if (error)
1014 			return error;
1015 
1016 		bh_ip = gfs2_meta_new(gl, blkno);
1017 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1018 
1019 		/* Unescape */
1020 		if (esc) {
1021 			__be32 *eptr = (__be32 *)bh_ip->b_data;
1022 			*eptr = cpu_to_be32(GFS2_MAGIC);
1023 		}
1024 		mark_buffer_dirty(bh_ip);
1025 
1026 		brelse(bh_log);
1027 		brelse(bh_ip);
1028 
1029 		jd->jd_replayed_blocks++;
1030 	}
1031 
1032 	return error;
1033 }
1034 
1035 /* FIXME: sort out accounting for log blocks etc. */
1036 
1037 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1038 {
1039 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1040 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1041 
1042 	if (error) {
1043 		gfs2_meta_sync(ip->i_gl);
1044 		return;
1045 	}
1046 	if (pass != 1)
1047 		return;
1048 
1049 	/* data sync? */
1050 	gfs2_meta_sync(ip->i_gl);
1051 
1052 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1053 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1054 }
1055 
1056 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1057 {
1058 	struct list_head *head;
1059 	struct gfs2_bufdata *bd;
1060 
1061 	if (tr == NULL)
1062 		return;
1063 
1064 	head = &tr->tr_databuf;
1065 	while (!list_empty(head)) {
1066 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
1067 		list_del_init(&bd->bd_list);
1068 		gfs2_unpin(sdp, bd->bd_bh, tr);
1069 	}
1070 }
1071 
1072 
1073 static const struct gfs2_log_operations gfs2_buf_lops = {
1074 	.lo_before_commit = buf_lo_before_commit,
1075 	.lo_after_commit = buf_lo_after_commit,
1076 	.lo_before_scan = buf_lo_before_scan,
1077 	.lo_scan_elements = buf_lo_scan_elements,
1078 	.lo_after_scan = buf_lo_after_scan,
1079 	.lo_name = "buf",
1080 };
1081 
1082 static const struct gfs2_log_operations gfs2_revoke_lops = {
1083 	.lo_before_commit = revoke_lo_before_commit,
1084 	.lo_after_commit = revoke_lo_after_commit,
1085 	.lo_before_scan = revoke_lo_before_scan,
1086 	.lo_scan_elements = revoke_lo_scan_elements,
1087 	.lo_after_scan = revoke_lo_after_scan,
1088 	.lo_name = "revoke",
1089 };
1090 
1091 static const struct gfs2_log_operations gfs2_databuf_lops = {
1092 	.lo_before_commit = databuf_lo_before_commit,
1093 	.lo_after_commit = databuf_lo_after_commit,
1094 	.lo_scan_elements = databuf_lo_scan_elements,
1095 	.lo_after_scan = databuf_lo_after_scan,
1096 	.lo_name = "databuf",
1097 };
1098 
1099 const struct gfs2_log_operations *gfs2_log_ops[] = {
1100 	&gfs2_databuf_lops,
1101 	&gfs2_buf_lops,
1102 	&gfs2_revoke_lops,
1103 	NULL,
1104 };
1105 
1106