xref: /openbmc/linux/fs/gfs2/lops.c (revision 4a586812)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "inode.h"
23 #include "glock.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "recovery.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
31 #include "trace_gfs2.h"
32 
33 /**
34  * gfs2_pin - Pin a buffer in memory
35  * @sdp: The superblock
36  * @bh: The buffer to be pinned
37  *
38  * The log lock must be held when calling this function
39  */
40 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
41 {
42 	struct gfs2_bufdata *bd;
43 
44 	BUG_ON(!current->journal_info);
45 
46 	clear_buffer_dirty(bh);
47 	if (test_set_buffer_pinned(bh))
48 		gfs2_assert_withdraw(sdp, 0);
49 	if (!buffer_uptodate(bh))
50 		gfs2_io_error_bh(sdp, bh);
51 	bd = bh->b_private;
52 	/* If this buffer is in the AIL and it has already been written
53 	 * to in-place disk block, remove it from the AIL.
54 	 */
55 	spin_lock(&sdp->sd_ail_lock);
56 	if (bd->bd_tr)
57 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
58 	spin_unlock(&sdp->sd_ail_lock);
59 	get_bh(bh);
60 	atomic_inc(&sdp->sd_log_pinned);
61 	trace_gfs2_pin(bd, 1);
62 }
63 
64 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
65 {
66 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
67 }
68 
69 static void maybe_release_space(struct gfs2_bufdata *bd)
70 {
71 	struct gfs2_glock *gl = bd->bd_gl;
72 	struct gfs2_sbd *sdp = gl->gl_sbd;
73 	struct gfs2_rgrpd *rgd = gl->gl_object;
74 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
75 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
76 
77 	if (bi->bi_clone == 0)
78 		return;
79 	if (sdp->sd_args.ar_discard)
80 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
81 	memcpy(bi->bi_clone + bi->bi_offset,
82 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
83 	clear_bit(GBF_FULL, &bi->bi_flags);
84 	rgd->rd_free_clone = rgd->rd_free;
85 }
86 
87 /**
88  * gfs2_unpin - Unpin a buffer
89  * @sdp: the filesystem the buffer belongs to
90  * @bh: The buffer to unpin
91  * @ai:
92  * @flags: The inode dirty flags
93  *
94  */
95 
96 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
97 		       struct gfs2_trans *tr)
98 {
99 	struct gfs2_bufdata *bd = bh->b_private;
100 
101 	BUG_ON(!buffer_uptodate(bh));
102 	BUG_ON(!buffer_pinned(bh));
103 
104 	lock_buffer(bh);
105 	mark_buffer_dirty(bh);
106 	clear_buffer_pinned(bh);
107 
108 	if (buffer_is_rgrp(bd))
109 		maybe_release_space(bd);
110 
111 	spin_lock(&sdp->sd_ail_lock);
112 	if (bd->bd_tr) {
113 		list_del(&bd->bd_ail_st_list);
114 		brelse(bh);
115 	} else {
116 		struct gfs2_glock *gl = bd->bd_gl;
117 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118 		atomic_inc(&gl->gl_ail_count);
119 	}
120 	bd->bd_tr = tr;
121 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
122 	spin_unlock(&sdp->sd_ail_lock);
123 
124 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125 	trace_gfs2_pin(bd, 0);
126 	unlock_buffer(bh);
127 	atomic_dec(&sdp->sd_log_pinned);
128 }
129 
130 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
131 {
132 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
134 
135 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136 		sdp->sd_log_flush_head = 0;
137 		sdp->sd_log_flush_wrapped = 1;
138 	}
139 }
140 
141 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142 {
143 	unsigned int lbn = sdp->sd_log_flush_head;
144 	struct gfs2_journal_extent *je;
145 	u64 block;
146 
147 	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148 		if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149 			block = je->dblock + lbn - je->lblock;
150 			gfs2_log_incr_head(sdp);
151 			return block;
152 		}
153 	}
154 
155 	return -1;
156 }
157 
158 /**
159  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160  * @sdp: The superblock
161  * @bvec: The bio_vec
162  * @error: The i/o status
163  *
164  * This finds the relavent buffers and unlocks then and sets the
165  * error flag according to the status of the i/o request. This is
166  * used when the log is writing data which has an in-place version
167  * that is pinned in the pagecache.
168  */
169 
170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171 				  int error)
172 {
173 	struct buffer_head *bh, *next;
174 	struct page *page = bvec->bv_page;
175 	unsigned size;
176 
177 	bh = page_buffers(page);
178 	size = bvec->bv_len;
179 	while (bh_offset(bh) < bvec->bv_offset)
180 		bh = bh->b_this_page;
181 	do {
182 		if (error)
183 			set_buffer_write_io_error(bh);
184 		unlock_buffer(bh);
185 		next = bh->b_this_page;
186 		size -= bh->b_size;
187 		brelse(bh);
188 		bh = next;
189 	} while(bh && size);
190 }
191 
192 /**
193  * gfs2_end_log_write - end of i/o to the log
194  * @bio: The bio
195  * @error: Status of i/o request
196  *
197  * Each bio_vec contains either data from the pagecache or data
198  * relating to the log itself. Here we iterate over the bio_vec
199  * array, processing both kinds of data.
200  *
201  */
202 
203 static void gfs2_end_log_write(struct bio *bio, int error)
204 {
205 	struct gfs2_sbd *sdp = bio->bi_private;
206 	struct bio_vec *bvec;
207 	struct page *page;
208 	int i;
209 
210 	if (error) {
211 		sdp->sd_log_error = error;
212 		fs_err(sdp, "Error %d writing to log\n", error);
213 	}
214 
215 	bio_for_each_segment_all(bvec, bio, i) {
216 		page = bvec->bv_page;
217 		if (page_has_buffers(page))
218 			gfs2_end_log_write_bh(sdp, bvec, error);
219 		else
220 			mempool_free(page, gfs2_page_pool);
221 	}
222 
223 	bio_put(bio);
224 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225 		wake_up(&sdp->sd_log_flush_wait);
226 }
227 
228 /**
229  * gfs2_log_flush_bio - Submit any pending log bio
230  * @sdp: The superblock
231  * @rw: The rw flags
232  *
233  * Submit any pending part-built or full bio to the block device. If
234  * there is no pending bio, then this is a no-op.
235  */
236 
237 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
238 {
239 	if (sdp->sd_log_bio) {
240 		atomic_inc(&sdp->sd_log_in_flight);
241 		submit_bio(rw, sdp->sd_log_bio);
242 		sdp->sd_log_bio = NULL;
243 	}
244 }
245 
246 /**
247  * gfs2_log_alloc_bio - Allocate a new bio for log writing
248  * @sdp: The superblock
249  * @blkno: The next device block number we want to write to
250  *
251  * This should never be called when there is a cached bio in the
252  * super block. When it returns, there will be a cached bio in the
253  * super block which will have as many bio_vecs as the device is
254  * happy to handle.
255  *
256  * Returns: Newly allocated bio
257  */
258 
259 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
260 {
261 	struct super_block *sb = sdp->sd_vfs;
262 	unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
263 	struct bio *bio;
264 
265 	BUG_ON(sdp->sd_log_bio);
266 
267 	while (1) {
268 		bio = bio_alloc(GFP_NOIO, nrvecs);
269 		if (likely(bio))
270 			break;
271 		nrvecs = max(nrvecs/2, 1U);
272 	}
273 
274 	bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275 	bio->bi_bdev = sb->s_bdev;
276 	bio->bi_end_io = gfs2_end_log_write;
277 	bio->bi_private = sdp;
278 
279 	sdp->sd_log_bio = bio;
280 
281 	return bio;
282 }
283 
284 /**
285  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
286  * @sdp: The superblock
287  * @blkno: The device block number we want to write to
288  *
289  * If there is a cached bio, then if the next block number is sequential
290  * with the previous one, return it, otherwise flush the bio to the
291  * device. If there is not a cached bio, or we just flushed it, then
292  * allocate a new one.
293  *
294  * Returns: The bio to use for log writes
295  */
296 
297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
298 {
299 	struct bio *bio = sdp->sd_log_bio;
300 	u64 nblk;
301 
302 	if (bio) {
303 		nblk = bio_end_sector(bio);
304 		nblk >>= sdp->sd_fsb2bb_shift;
305 		if (blkno == nblk)
306 			return bio;
307 		gfs2_log_flush_bio(sdp, WRITE);
308 	}
309 
310 	return gfs2_log_alloc_bio(sdp, blkno);
311 }
312 
313 
314 /**
315  * gfs2_log_write - write to log
316  * @sdp: the filesystem
317  * @page: the page to write
318  * @size: the size of the data to write
319  * @offset: the offset within the page
320  *
321  * Try and add the page segment to the current bio. If that fails,
322  * submit the current bio to the device and create a new one, and
323  * then add the page segment to that.
324  */
325 
326 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327 			   unsigned size, unsigned offset)
328 {
329 	u64 blkno = gfs2_log_bmap(sdp);
330 	struct bio *bio;
331 	int ret;
332 
333 	bio = gfs2_log_get_bio(sdp, blkno);
334 	ret = bio_add_page(bio, page, size, offset);
335 	if (ret == 0) {
336 		gfs2_log_flush_bio(sdp, WRITE);
337 		bio = gfs2_log_alloc_bio(sdp, blkno);
338 		ret = bio_add_page(bio, page, size, offset);
339 		WARN_ON(ret == 0);
340 	}
341 }
342 
343 /**
344  * gfs2_log_write_bh - write a buffer's content to the log
345  * @sdp: The super block
346  * @bh: The buffer pointing to the in-place location
347  *
348  * This writes the content of the buffer to the next available location
349  * in the log. The buffer will be unlocked once the i/o to the log has
350  * completed.
351  */
352 
353 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354 {
355 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
356 }
357 
358 /**
359  * gfs2_log_write_page - write one block stored in a page, into the log
360  * @sdp: The superblock
361  * @page: The struct page
362  *
363  * This writes the first block-sized part of the page into the log. Note
364  * that the page must have been allocated from the gfs2_page_pool mempool
365  * and that after this has been called, ownership has been transferred and
366  * the page may be freed at any time.
367  */
368 
369 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370 {
371 	struct super_block *sb = sdp->sd_vfs;
372 	gfs2_log_write(sdp, page, sb->s_blocksize, 0);
373 }
374 
375 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
376 				      u32 ld_length, u32 ld_data1)
377 {
378 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
379 	struct gfs2_log_descriptor *ld = page_address(page);
380 	clear_page(ld);
381 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
382 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
383 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
384 	ld->ld_type = cpu_to_be32(ld_type);
385 	ld->ld_length = cpu_to_be32(ld_length);
386 	ld->ld_data1 = cpu_to_be32(ld_data1);
387 	ld->ld_data2 = 0;
388 	return page;
389 }
390 
391 static void gfs2_check_magic(struct buffer_head *bh)
392 {
393 	void *kaddr;
394 	__be32 *ptr;
395 
396 	clear_buffer_escaped(bh);
397 	kaddr = kmap_atomic(bh->b_page);
398 	ptr = kaddr + bh_offset(bh);
399 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
400 		set_buffer_escaped(bh);
401 	kunmap_atomic(kaddr);
402 }
403 
404 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
405 				unsigned int total, struct list_head *blist,
406 				bool is_databuf)
407 {
408 	struct gfs2_log_descriptor *ld;
409 	struct gfs2_bufdata *bd1 = NULL, *bd2;
410 	struct page *page;
411 	unsigned int num;
412 	unsigned n;
413 	__be64 *ptr;
414 
415 	gfs2_log_lock(sdp);
416 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
417 	while(total) {
418 		num = total;
419 		if (total > limit)
420 			num = limit;
421 		gfs2_log_unlock(sdp);
422 		page = gfs2_get_log_desc(sdp,
423 					 is_databuf ? GFS2_LOG_DESC_JDATA :
424 					 GFS2_LOG_DESC_METADATA, num + 1, num);
425 		ld = page_address(page);
426 		gfs2_log_lock(sdp);
427 		ptr = (__be64 *)(ld + 1);
428 
429 		n = 0;
430 		list_for_each_entry_continue(bd1, blist, bd_list) {
431 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
432 			if (is_databuf) {
433 				gfs2_check_magic(bd1->bd_bh);
434 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
435 			}
436 			if (++n >= num)
437 				break;
438 		}
439 
440 		gfs2_log_unlock(sdp);
441 		gfs2_log_write_page(sdp, page);
442 		gfs2_log_lock(sdp);
443 
444 		n = 0;
445 		list_for_each_entry_continue(bd2, blist, bd_list) {
446 			get_bh(bd2->bd_bh);
447 			gfs2_log_unlock(sdp);
448 			lock_buffer(bd2->bd_bh);
449 
450 			if (buffer_escaped(bd2->bd_bh)) {
451 				void *kaddr;
452 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
453 				ptr = page_address(page);
454 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
455 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
456 				       bd2->bd_bh->b_size);
457 				kunmap_atomic(kaddr);
458 				*(__be32 *)ptr = 0;
459 				clear_buffer_escaped(bd2->bd_bh);
460 				unlock_buffer(bd2->bd_bh);
461 				brelse(bd2->bd_bh);
462 				gfs2_log_write_page(sdp, page);
463 			} else {
464 				gfs2_log_write_bh(sdp, bd2->bd_bh);
465 			}
466 			gfs2_log_lock(sdp);
467 			if (++n >= num)
468 				break;
469 		}
470 
471 		BUG_ON(total < num);
472 		total -= num;
473 	}
474 	gfs2_log_unlock(sdp);
475 }
476 
477 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
478 {
479 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
480 
481 	gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
482 			   &sdp->sd_log_le_buf, 0);
483 }
484 
485 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
486 {
487 	struct list_head *head = &sdp->sd_log_le_buf;
488 	struct gfs2_bufdata *bd;
489 
490 	if (tr == NULL) {
491 		gfs2_assert(sdp, list_empty(head));
492 		return;
493 	}
494 
495 	while (!list_empty(head)) {
496 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
497 		list_del_init(&bd->bd_list);
498 		sdp->sd_log_num_buf--;
499 
500 		gfs2_unpin(sdp, bd->bd_bh, tr);
501 	}
502 	gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
503 }
504 
505 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
506 			       struct gfs2_log_header_host *head, int pass)
507 {
508 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
509 
510 	if (pass != 0)
511 		return;
512 
513 	sdp->sd_found_blocks = 0;
514 	sdp->sd_replayed_blocks = 0;
515 }
516 
517 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
518 				struct gfs2_log_descriptor *ld, __be64 *ptr,
519 				int pass)
520 {
521 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
522 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
523 	struct gfs2_glock *gl = ip->i_gl;
524 	unsigned int blks = be32_to_cpu(ld->ld_data1);
525 	struct buffer_head *bh_log, *bh_ip;
526 	u64 blkno;
527 	int error = 0;
528 
529 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
530 		return 0;
531 
532 	gfs2_replay_incr_blk(sdp, &start);
533 
534 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
535 		blkno = be64_to_cpu(*ptr++);
536 
537 		sdp->sd_found_blocks++;
538 
539 		if (gfs2_revoke_check(sdp, blkno, start))
540 			continue;
541 
542 		error = gfs2_replay_read_block(jd, start, &bh_log);
543 		if (error)
544 			return error;
545 
546 		bh_ip = gfs2_meta_new(gl, blkno);
547 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
548 
549 		if (gfs2_meta_check(sdp, bh_ip))
550 			error = -EIO;
551 		else
552 			mark_buffer_dirty(bh_ip);
553 
554 		brelse(bh_log);
555 		brelse(bh_ip);
556 
557 		if (error)
558 			break;
559 
560 		sdp->sd_replayed_blocks++;
561 	}
562 
563 	return error;
564 }
565 
566 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
567 {
568 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
569 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
570 
571 	if (error) {
572 		gfs2_meta_sync(ip->i_gl);
573 		return;
574 	}
575 	if (pass != 1)
576 		return;
577 
578 	gfs2_meta_sync(ip->i_gl);
579 
580 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
581 	        jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
582 }
583 
584 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
585 {
586 	struct gfs2_meta_header *mh;
587 	unsigned int offset;
588 	struct list_head *head = &sdp->sd_log_le_revoke;
589 	struct gfs2_bufdata *bd;
590 	struct page *page;
591 	unsigned int length;
592 
593 	if (!sdp->sd_log_num_revoke)
594 		return;
595 
596 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
597 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
598 	offset = sizeof(struct gfs2_log_descriptor);
599 
600 	list_for_each_entry(bd, head, bd_list) {
601 		sdp->sd_log_num_revoke--;
602 
603 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
604 
605 			gfs2_log_write_page(sdp, page);
606 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
607 			mh = page_address(page);
608 			clear_page(mh);
609 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
610 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
611 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
612 			offset = sizeof(struct gfs2_meta_header);
613 		}
614 
615 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
616 		offset += sizeof(u64);
617 	}
618 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
619 
620 	gfs2_log_write_page(sdp, page);
621 }
622 
623 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
624 {
625 	struct list_head *head = &sdp->sd_log_le_revoke;
626 	struct gfs2_bufdata *bd;
627 	struct gfs2_glock *gl;
628 
629 	while (!list_empty(head)) {
630 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
631 		list_del_init(&bd->bd_list);
632 		gl = bd->bd_gl;
633 		atomic_dec(&gl->gl_revokes);
634 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
635 		kmem_cache_free(gfs2_bufdata_cachep, bd);
636 	}
637 }
638 
639 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
640 				  struct gfs2_log_header_host *head, int pass)
641 {
642 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
643 
644 	if (pass != 0)
645 		return;
646 
647 	sdp->sd_found_revokes = 0;
648 	sdp->sd_replay_tail = head->lh_tail;
649 }
650 
651 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
652 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
653 				   int pass)
654 {
655 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
656 	unsigned int blks = be32_to_cpu(ld->ld_length);
657 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
658 	struct buffer_head *bh;
659 	unsigned int offset;
660 	u64 blkno;
661 	int first = 1;
662 	int error;
663 
664 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
665 		return 0;
666 
667 	offset = sizeof(struct gfs2_log_descriptor);
668 
669 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
670 		error = gfs2_replay_read_block(jd, start, &bh);
671 		if (error)
672 			return error;
673 
674 		if (!first)
675 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
676 
677 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
678 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
679 
680 			error = gfs2_revoke_add(sdp, blkno, start);
681 			if (error < 0) {
682 				brelse(bh);
683 				return error;
684 			}
685 			else if (error)
686 				sdp->sd_found_revokes++;
687 
688 			if (!--revokes)
689 				break;
690 			offset += sizeof(u64);
691 		}
692 
693 		brelse(bh);
694 		offset = sizeof(struct gfs2_meta_header);
695 		first = 0;
696 	}
697 
698 	return 0;
699 }
700 
701 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
702 {
703 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
704 
705 	if (error) {
706 		gfs2_revoke_clean(sdp);
707 		return;
708 	}
709 	if (pass != 1)
710 		return;
711 
712 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
713 	        jd->jd_jid, sdp->sd_found_revokes);
714 
715 	gfs2_revoke_clean(sdp);
716 }
717 
718 /**
719  * databuf_lo_before_commit - Scan the data buffers, writing as we go
720  *
721  */
722 
723 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
724 {
725 	unsigned int limit = buf_limit(sdp) / 2;
726 
727 	gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
728 			   &sdp->sd_log_le_databuf, 1);
729 }
730 
731 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
732 				    struct gfs2_log_descriptor *ld,
733 				    __be64 *ptr, int pass)
734 {
735 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
736 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
737 	struct gfs2_glock *gl = ip->i_gl;
738 	unsigned int blks = be32_to_cpu(ld->ld_data1);
739 	struct buffer_head *bh_log, *bh_ip;
740 	u64 blkno;
741 	u64 esc;
742 	int error = 0;
743 
744 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
745 		return 0;
746 
747 	gfs2_replay_incr_blk(sdp, &start);
748 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
749 		blkno = be64_to_cpu(*ptr++);
750 		esc = be64_to_cpu(*ptr++);
751 
752 		sdp->sd_found_blocks++;
753 
754 		if (gfs2_revoke_check(sdp, blkno, start))
755 			continue;
756 
757 		error = gfs2_replay_read_block(jd, start, &bh_log);
758 		if (error)
759 			return error;
760 
761 		bh_ip = gfs2_meta_new(gl, blkno);
762 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
763 
764 		/* Unescape */
765 		if (esc) {
766 			__be32 *eptr = (__be32 *)bh_ip->b_data;
767 			*eptr = cpu_to_be32(GFS2_MAGIC);
768 		}
769 		mark_buffer_dirty(bh_ip);
770 
771 		brelse(bh_log);
772 		brelse(bh_ip);
773 
774 		sdp->sd_replayed_blocks++;
775 	}
776 
777 	return error;
778 }
779 
780 /* FIXME: sort out accounting for log blocks etc. */
781 
782 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
783 {
784 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
785 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
786 
787 	if (error) {
788 		gfs2_meta_sync(ip->i_gl);
789 		return;
790 	}
791 	if (pass != 1)
792 		return;
793 
794 	/* data sync? */
795 	gfs2_meta_sync(ip->i_gl);
796 
797 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
798 		jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
799 }
800 
801 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
802 {
803 	struct list_head *head = &sdp->sd_log_le_databuf;
804 	struct gfs2_bufdata *bd;
805 
806 	if (tr == NULL) {
807 		gfs2_assert(sdp, list_empty(head));
808 		return;
809 	}
810 
811 	while (!list_empty(head)) {
812 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
813 		list_del_init(&bd->bd_list);
814 		sdp->sd_log_num_databuf--;
815 		gfs2_unpin(sdp, bd->bd_bh, tr);
816 	}
817 	gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
818 }
819 
820 
821 const struct gfs2_log_operations gfs2_buf_lops = {
822 	.lo_before_commit = buf_lo_before_commit,
823 	.lo_after_commit = buf_lo_after_commit,
824 	.lo_before_scan = buf_lo_before_scan,
825 	.lo_scan_elements = buf_lo_scan_elements,
826 	.lo_after_scan = buf_lo_after_scan,
827 	.lo_name = "buf",
828 };
829 
830 const struct gfs2_log_operations gfs2_revoke_lops = {
831 	.lo_before_commit = revoke_lo_before_commit,
832 	.lo_after_commit = revoke_lo_after_commit,
833 	.lo_before_scan = revoke_lo_before_scan,
834 	.lo_scan_elements = revoke_lo_scan_elements,
835 	.lo_after_scan = revoke_lo_after_scan,
836 	.lo_name = "revoke",
837 };
838 
839 const struct gfs2_log_operations gfs2_rg_lops = {
840 	.lo_name = "rg",
841 };
842 
843 const struct gfs2_log_operations gfs2_databuf_lops = {
844 	.lo_before_commit = databuf_lo_before_commit,
845 	.lo_after_commit = databuf_lo_after_commit,
846 	.lo_scan_elements = databuf_lo_scan_elements,
847 	.lo_after_scan = databuf_lo_after_scan,
848 	.lo_name = "databuf",
849 };
850 
851 const struct gfs2_log_operations *gfs2_log_ops[] = {
852 	&gfs2_databuf_lops,
853 	&gfs2_buf_lops,
854 	&gfs2_rg_lops,
855 	&gfs2_revoke_lops,
856 	NULL,
857 };
858 
859